1/*-
2 * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU)
3 *
4 * Copyright (c) 1991, 1993
5 *	The Regents of the University of California.  All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * The Mach Operating System project at Carnegie-Mellon University.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 *    may be used to endorse or promote products derived from this software
20 *    without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 *	from: @(#)vm_map.c	8.3 (Berkeley) 1/12/94
35 *
36 *
37 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
38 * All rights reserved.
39 *
40 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
41 *
42 * Permission to use, copy, modify and distribute this software and
43 * its documentation is hereby granted, provided that both the copyright
44 * notice and this permission notice appear in all copies of the
45 * software, derivative works or modified versions, and any portions
46 * thereof, and that both notices appear in supporting documentation.
47 *
48 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
49 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
50 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
51 *
52 * Carnegie Mellon requests users of this software to return to
53 *
54 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
55 *  School of Computer Science
56 *  Carnegie Mellon University
57 *  Pittsburgh PA 15213-3890
58 *
59 * any improvements or extensions that they make and grant Carnegie the
60 * rights to redistribute these changes.
61 */
62
63/*
64 *	Virtual memory mapping module.
65 */
66
67#include <sys/cdefs.h>
68__FBSDID("$FreeBSD$");
69
70#include <sys/param.h>
71#include <sys/systm.h>
72#include <sys/elf.h>
73#include <sys/kernel.h>
74#include <sys/ktr.h>
75#include <sys/lock.h>
76#include <sys/mutex.h>
77#include <sys/proc.h>
78#include <sys/vmmeter.h>
79#include <sys/mman.h>
80#include <sys/vnode.h>
81#include <sys/racct.h>
82#include <sys/resourcevar.h>
83#include <sys/rwlock.h>
84#include <sys/file.h>
85#include <sys/sysctl.h>
86#include <sys/sysent.h>
87#include <sys/shm.h>
88
89#include <vm/vm.h>
90#include <vm/vm_param.h>
91#include <vm/pmap.h>
92#include <vm/vm_map.h>
93#include <vm/vm_page.h>
94#include <vm/vm_pageout.h>
95#include <vm/vm_object.h>
96#include <vm/vm_pager.h>
97#include <vm/vm_kern.h>
98#include <vm/vm_extern.h>
99#include <vm/vnode_pager.h>
100#include <vm/swap_pager.h>
101#include <vm/uma.h>
102
103/*
104 *	Virtual memory maps provide for the mapping, protection,
105 *	and sharing of virtual memory objects.  In addition,
106 *	this module provides for an efficient virtual copy of
107 *	memory from one map to another.
108 *
109 *	Synchronization is required prior to most operations.
110 *
111 *	Maps consist of an ordered doubly-linked list of simple
112 *	entries; a self-adjusting binary search tree of these
113 *	entries is used to speed up lookups.
114 *
115 *	Since portions of maps are specified by start/end addresses,
116 *	which may not align with existing map entries, all
117 *	routines merely "clip" entries to these start/end values.
118 *	[That is, an entry is split into two, bordering at a
119 *	start or end value.]  Note that these clippings may not
120 *	always be necessary (as the two resulting entries are then
121 *	not changed); however, the clipping is done for convenience.
122 *
123 *	As mentioned above, virtual copy operations are performed
124 *	by copying VM object references from one map to
125 *	another, and then marking both regions as copy-on-write.
126 */
127
128static struct mtx map_sleep_mtx;
129static uma_zone_t mapentzone;
130static uma_zone_t kmapentzone;
131static uma_zone_t vmspace_zone;
132static int vmspace_zinit(void *mem, int size, int flags);
133static void _vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min,
134    vm_offset_t max);
135static void vm_map_entry_deallocate(vm_map_entry_t entry, boolean_t system_map);
136static void vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry);
137static void vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry);
138static int vm_map_growstack(vm_map_t map, vm_offset_t addr,
139    vm_map_entry_t gap_entry);
140static void vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot,
141    vm_object_t object, vm_pindex_t pindex, vm_size_t size, int flags);
142#ifdef INVARIANTS
143static void vmspace_zdtor(void *mem, int size, void *arg);
144#endif
145static int vm_map_stack_locked(vm_map_t map, vm_offset_t addrbos,
146    vm_size_t max_ssize, vm_size_t growsize, vm_prot_t prot, vm_prot_t max,
147    int cow);
148static void vm_map_wire_entry_failure(vm_map_t map, vm_map_entry_t entry,
149    vm_offset_t failed_addr);
150
151#define	ENTRY_CHARGED(e) ((e)->cred != NULL || \
152    ((e)->object.vm_object != NULL && (e)->object.vm_object->cred != NULL && \
153     !((e)->eflags & MAP_ENTRY_NEEDS_COPY)))
154
155/*
156 * PROC_VMSPACE_{UN,}LOCK() can be a noop as long as vmspaces are type
157 * stable.
158 */
159#define PROC_VMSPACE_LOCK(p) do { } while (0)
160#define PROC_VMSPACE_UNLOCK(p) do { } while (0)
161
162/*
163 *	VM_MAP_RANGE_CHECK:	[ internal use only ]
164 *
165 *	Asserts that the starting and ending region
166 *	addresses fall within the valid range of the map.
167 */
168#define	VM_MAP_RANGE_CHECK(map, start, end)		\
169		{					\
170		if (start < vm_map_min(map))		\
171			start = vm_map_min(map);	\
172		if (end > vm_map_max(map))		\
173			end = vm_map_max(map);		\
174		if (start > end)			\
175			start = end;			\
176		}
177
178#ifndef UMA_MD_SMALL_ALLOC
179
180/*
181 * Allocate a new slab for kernel map entries.  The kernel map may be locked or
182 * unlocked, depending on whether the request is coming from the kernel map or a
183 * submap.  This function allocates a virtual address range directly from the
184 * kernel map instead of the kmem_* layer to avoid recursion on the kernel map
185 * lock and also to avoid triggering allocator recursion in the vmem boundary
186 * tag allocator.
187 */
188static void *
189kmapent_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag,
190    int wait)
191{
192	vm_offset_t addr;
193	int error, locked;
194
195	*pflag = UMA_SLAB_PRIV;
196
197	if (!(locked = vm_map_locked(kernel_map)))
198		vm_map_lock(kernel_map);
199	addr = vm_map_findspace(kernel_map, vm_map_min(kernel_map), bytes);
200	if (addr + bytes < addr || addr + bytes > vm_map_max(kernel_map))
201		panic("%s: kernel map is exhausted", __func__);
202	error = vm_map_insert(kernel_map, NULL, 0, addr, addr + bytes,
203	    VM_PROT_RW, VM_PROT_RW, MAP_NOFAULT);
204	if (error != KERN_SUCCESS)
205		panic("%s: vm_map_insert() failed: %d", __func__, error);
206	if (!locked)
207		vm_map_unlock(kernel_map);
208	error = kmem_back_domain(domain, kernel_object, addr, bytes, M_NOWAIT |
209	    M_USE_RESERVE | (wait & M_ZERO));
210	if (error == KERN_SUCCESS) {
211		return ((void *)addr);
212	} else {
213		if (!locked)
214			vm_map_lock(kernel_map);
215		vm_map_delete(kernel_map, addr, bytes);
216		if (!locked)
217			vm_map_unlock(kernel_map);
218		return (NULL);
219	}
220}
221
222static void
223kmapent_free(void *item, vm_size_t size, uint8_t pflag)
224{
225	vm_offset_t addr;
226	int error;
227
228	if ((pflag & UMA_SLAB_PRIV) == 0)
229		/* XXX leaked */
230		return;
231
232	addr = (vm_offset_t)item;
233	kmem_unback(kernel_object, addr, size);
234	error = vm_map_remove(kernel_map, addr, addr + size);
235	KASSERT(error == KERN_SUCCESS,
236	    ("%s: vm_map_remove failed: %d", __func__, error));
237}
238
239/*
240 * The worst-case upper bound on the number of kernel map entries that may be
241 * created before the zone must be replenished in _vm_map_unlock().
242 */
243#define	KMAPENT_RESERVE		1
244
245#endif /* !UMD_MD_SMALL_ALLOC */
246
247/*
248 *	vm_map_startup:
249 *
250 *	Initialize the vm_map module.  Must be called before any other vm_map
251 *	routines.
252 *
253 *	User map and entry structures are allocated from the general purpose
254 *	memory pool.  Kernel maps are statically defined.  Kernel map entries
255 *	require special handling to avoid recursion; see the comments above
256 *	kmapent_alloc() and in vm_map_entry_create().
257 */
258void
259vm_map_startup(void)
260{
261	mtx_init(&map_sleep_mtx, "vm map sleep mutex", NULL, MTX_DEF);
262
263	/*
264	 * Disable the use of per-CPU buckets: map entry allocation is
265	 * serialized by the kernel map lock.
266	 */
267	kmapentzone = uma_zcreate("KMAP ENTRY", sizeof(struct vm_map_entry),
268	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
269	    UMA_ZONE_VM | UMA_ZONE_NOBUCKET);
270#ifndef UMA_MD_SMALL_ALLOC
271	/* Reserve an extra map entry for use when replenishing the reserve. */
272	uma_zone_reserve(kmapentzone, KMAPENT_RESERVE + 1);
273	uma_prealloc(kmapentzone, KMAPENT_RESERVE + 1);
274	uma_zone_set_allocf(kmapentzone, kmapent_alloc);
275	uma_zone_set_freef(kmapentzone, kmapent_free);
276#endif
277
278	mapentzone = uma_zcreate("MAP ENTRY", sizeof(struct vm_map_entry),
279	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
280	vmspace_zone = uma_zcreate("VMSPACE", sizeof(struct vmspace), NULL,
281#ifdef INVARIANTS
282	    vmspace_zdtor,
283#else
284	    NULL,
285#endif
286	    vmspace_zinit, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
287}
288
289static int
290vmspace_zinit(void *mem, int size, int flags)
291{
292	struct vmspace *vm;
293	vm_map_t map;
294
295	vm = (struct vmspace *)mem;
296	map = &vm->vm_map;
297
298	memset(map, 0, sizeof(*map));
299	mtx_init(&map->system_mtx, "vm map (system)", NULL,
300	    MTX_DEF | MTX_DUPOK);
301	sx_init(&map->lock, "vm map (user)");
302	PMAP_LOCK_INIT(vmspace_pmap(vm));
303	return (0);
304}
305
306#ifdef INVARIANTS
307static void
308vmspace_zdtor(void *mem, int size, void *arg)
309{
310	struct vmspace *vm;
311
312	vm = (struct vmspace *)mem;
313	KASSERT(vm->vm_map.nentries == 0,
314	    ("vmspace %p nentries == %d on free", vm, vm->vm_map.nentries));
315	KASSERT(vm->vm_map.size == 0,
316	    ("vmspace %p size == %ju on free", vm, (uintmax_t)vm->vm_map.size));
317}
318#endif	/* INVARIANTS */
319
320/*
321 * Allocate a vmspace structure, including a vm_map and pmap,
322 * and initialize those structures.  The refcnt is set to 1.
323 */
324struct vmspace *
325vmspace_alloc(vm_offset_t min, vm_offset_t max, pmap_pinit_t pinit)
326{
327	struct vmspace *vm;
328
329	vm = uma_zalloc(vmspace_zone, M_WAITOK);
330	KASSERT(vm->vm_map.pmap == NULL, ("vm_map.pmap must be NULL"));
331	if (!pinit(vmspace_pmap(vm))) {
332		uma_zfree(vmspace_zone, vm);
333		return (NULL);
334	}
335	CTR1(KTR_VM, "vmspace_alloc: %p", vm);
336	_vm_map_init(&vm->vm_map, vmspace_pmap(vm), min, max);
337	refcount_init(&vm->vm_refcnt, 1);
338	vm->vm_shm = NULL;
339	vm->vm_swrss = 0;
340	vm->vm_tsize = 0;
341	vm->vm_dsize = 0;
342	vm->vm_ssize = 0;
343	vm->vm_taddr = 0;
344	vm->vm_daddr = 0;
345	vm->vm_maxsaddr = 0;
346	return (vm);
347}
348
349#ifdef RACCT
350static void
351vmspace_container_reset(struct proc *p)
352{
353
354	PROC_LOCK(p);
355	racct_set(p, RACCT_DATA, 0);
356	racct_set(p, RACCT_STACK, 0);
357	racct_set(p, RACCT_RSS, 0);
358	racct_set(p, RACCT_MEMLOCK, 0);
359	racct_set(p, RACCT_VMEM, 0);
360	PROC_UNLOCK(p);
361}
362#endif
363
364static inline void
365vmspace_dofree(struct vmspace *vm)
366{
367
368	CTR1(KTR_VM, "vmspace_free: %p", vm);
369
370	/*
371	 * Make sure any SysV shm is freed, it might not have been in
372	 * exit1().
373	 */
374	shmexit(vm);
375
376	/*
377	 * Lock the map, to wait out all other references to it.
378	 * Delete all of the mappings and pages they hold, then call
379	 * the pmap module to reclaim anything left.
380	 */
381	(void)vm_map_remove(&vm->vm_map, vm_map_min(&vm->vm_map),
382	    vm_map_max(&vm->vm_map));
383
384	pmap_release(vmspace_pmap(vm));
385	vm->vm_map.pmap = NULL;
386	uma_zfree(vmspace_zone, vm);
387}
388
389void
390vmspace_free(struct vmspace *vm)
391{
392
393	WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
394	    "vmspace_free() called");
395
396	if (refcount_release(&vm->vm_refcnt))
397		vmspace_dofree(vm);
398}
399
400void
401vmspace_exitfree(struct proc *p)
402{
403	struct vmspace *vm;
404
405	PROC_VMSPACE_LOCK(p);
406	vm = p->p_vmspace;
407	p->p_vmspace = NULL;
408	PROC_VMSPACE_UNLOCK(p);
409	KASSERT(vm == &vmspace0, ("vmspace_exitfree: wrong vmspace"));
410	vmspace_free(vm);
411}
412
413void
414vmspace_exit(struct thread *td)
415{
416	struct vmspace *vm;
417	struct proc *p;
418	bool released;
419
420	p = td->td_proc;
421	vm = p->p_vmspace;
422
423	/*
424	 * Prepare to release the vmspace reference.  The thread that releases
425	 * the last reference is responsible for tearing down the vmspace.
426	 * However, threads not releasing the final reference must switch to the
427	 * kernel's vmspace0 before the decrement so that the subsequent pmap
428	 * deactivation does not modify a freed vmspace.
429	 */
430	refcount_acquire(&vmspace0.vm_refcnt);
431	if (!(released = refcount_release_if_last(&vm->vm_refcnt))) {
432		if (p->p_vmspace != &vmspace0) {
433			PROC_VMSPACE_LOCK(p);
434			p->p_vmspace = &vmspace0;
435			PROC_VMSPACE_UNLOCK(p);
436			pmap_activate(td);
437		}
438		released = refcount_release(&vm->vm_refcnt);
439	}
440	if (released) {
441		/*
442		 * pmap_remove_pages() expects the pmap to be active, so switch
443		 * back first if necessary.
444		 */
445		if (p->p_vmspace != vm) {
446			PROC_VMSPACE_LOCK(p);
447			p->p_vmspace = vm;
448			PROC_VMSPACE_UNLOCK(p);
449			pmap_activate(td);
450		}
451		pmap_remove_pages(vmspace_pmap(vm));
452		PROC_VMSPACE_LOCK(p);
453		p->p_vmspace = &vmspace0;
454		PROC_VMSPACE_UNLOCK(p);
455		pmap_activate(td);
456		vmspace_dofree(vm);
457	}
458#ifdef RACCT
459	if (racct_enable)
460		vmspace_container_reset(p);
461#endif
462}
463
464/* Acquire reference to vmspace owned by another process. */
465
466struct vmspace *
467vmspace_acquire_ref(struct proc *p)
468{
469	struct vmspace *vm;
470
471	PROC_VMSPACE_LOCK(p);
472	vm = p->p_vmspace;
473	if (vm == NULL || !refcount_acquire_if_not_zero(&vm->vm_refcnt)) {
474		PROC_VMSPACE_UNLOCK(p);
475		return (NULL);
476	}
477	if (vm != p->p_vmspace) {
478		PROC_VMSPACE_UNLOCK(p);
479		vmspace_free(vm);
480		return (NULL);
481	}
482	PROC_VMSPACE_UNLOCK(p);
483	return (vm);
484}
485
486/*
487 * Switch between vmspaces in an AIO kernel process.
488 *
489 * The new vmspace is either the vmspace of a user process obtained
490 * from an active AIO request or the initial vmspace of the AIO kernel
491 * process (when it is idling).  Because user processes will block to
492 * drain any active AIO requests before proceeding in exit() or
493 * execve(), the reference count for vmspaces from AIO requests can
494 * never be 0.  Similarly, AIO kernel processes hold an extra
495 * reference on their initial vmspace for the life of the process.  As
496 * a result, the 'newvm' vmspace always has a non-zero reference
497 * count.  This permits an additional reference on 'newvm' to be
498 * acquired via a simple atomic increment rather than the loop in
499 * vmspace_acquire_ref() above.
500 */
501void
502vmspace_switch_aio(struct vmspace *newvm)
503{
504	struct vmspace *oldvm;
505
506	/* XXX: Need some way to assert that this is an aio daemon. */
507
508	KASSERT(refcount_load(&newvm->vm_refcnt) > 0,
509	    ("vmspace_switch_aio: newvm unreferenced"));
510
511	oldvm = curproc->p_vmspace;
512	if (oldvm == newvm)
513		return;
514
515	/*
516	 * Point to the new address space and refer to it.
517	 */
518	curproc->p_vmspace = newvm;
519	refcount_acquire(&newvm->vm_refcnt);
520
521	/* Activate the new mapping. */
522	pmap_activate(curthread);
523
524	vmspace_free(oldvm);
525}
526
527void
528_vm_map_lock(vm_map_t map, const char *file, int line)
529{
530
531	if (map->system_map)
532		mtx_lock_flags_(&map->system_mtx, 0, file, line);
533	else
534		sx_xlock_(&map->lock, file, line);
535	map->timestamp++;
536}
537
538void
539vm_map_entry_set_vnode_text(vm_map_entry_t entry, bool add)
540{
541	vm_object_t object;
542	struct vnode *vp;
543	bool vp_held;
544
545	if ((entry->eflags & MAP_ENTRY_VN_EXEC) == 0)
546		return;
547	KASSERT((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0,
548	    ("Submap with execs"));
549	object = entry->object.vm_object;
550	KASSERT(object != NULL, ("No object for text, entry %p", entry));
551	if ((object->flags & OBJ_ANON) != 0)
552		object = object->handle;
553	else
554		KASSERT(object->backing_object == NULL,
555		    ("non-anon object %p shadows", object));
556	KASSERT(object != NULL, ("No content object for text, entry %p obj %p",
557	    entry, entry->object.vm_object));
558
559	/*
560	 * Mostly, we do not lock the backing object.  It is
561	 * referenced by the entry we are processing, so it cannot go
562	 * away.
563	 */
564	vm_pager_getvp(object, &vp, &vp_held);
565	if (vp != NULL) {
566		if (add) {
567			VOP_SET_TEXT_CHECKED(vp);
568		} else {
569			vn_lock(vp, LK_SHARED | LK_RETRY);
570			VOP_UNSET_TEXT_CHECKED(vp);
571			VOP_UNLOCK(vp);
572		}
573		if (vp_held)
574			vdrop(vp);
575	}
576}
577
578/*
579 * Use a different name for this vm_map_entry field when it's use
580 * is not consistent with its use as part of an ordered search tree.
581 */
582#define defer_next right
583
584static void
585vm_map_process_deferred(void)
586{
587	struct thread *td;
588	vm_map_entry_t entry, next;
589	vm_object_t object;
590
591	td = curthread;
592	entry = td->td_map_def_user;
593	td->td_map_def_user = NULL;
594	while (entry != NULL) {
595		next = entry->defer_next;
596		MPASS((entry->eflags & (MAP_ENTRY_WRITECNT |
597		    MAP_ENTRY_VN_EXEC)) != (MAP_ENTRY_WRITECNT |
598		    MAP_ENTRY_VN_EXEC));
599		if ((entry->eflags & MAP_ENTRY_WRITECNT) != 0) {
600			/*
601			 * Decrement the object's writemappings and
602			 * possibly the vnode's v_writecount.
603			 */
604			KASSERT((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0,
605			    ("Submap with writecount"));
606			object = entry->object.vm_object;
607			KASSERT(object != NULL, ("No object for writecount"));
608			vm_pager_release_writecount(object, entry->start,
609			    entry->end);
610		}
611		vm_map_entry_set_vnode_text(entry, false);
612		vm_map_entry_deallocate(entry, FALSE);
613		entry = next;
614	}
615}
616
617#ifdef INVARIANTS
618static void
619_vm_map_assert_locked(vm_map_t map, const char *file, int line)
620{
621
622	if (map->system_map)
623		mtx_assert_(&map->system_mtx, MA_OWNED, file, line);
624	else
625		sx_assert_(&map->lock, SA_XLOCKED, file, line);
626}
627
628#define	VM_MAP_ASSERT_LOCKED(map) \
629    _vm_map_assert_locked(map, LOCK_FILE, LOCK_LINE)
630
631enum { VMMAP_CHECK_NONE, VMMAP_CHECK_UNLOCK, VMMAP_CHECK_ALL };
632#ifdef DIAGNOSTIC
633static int enable_vmmap_check = VMMAP_CHECK_UNLOCK;
634#else
635static int enable_vmmap_check = VMMAP_CHECK_NONE;
636#endif
637SYSCTL_INT(_debug, OID_AUTO, vmmap_check, CTLFLAG_RWTUN,
638    &enable_vmmap_check, 0, "Enable vm map consistency checking");
639
640static void _vm_map_assert_consistent(vm_map_t map, int check);
641
642#define VM_MAP_ASSERT_CONSISTENT(map) \
643    _vm_map_assert_consistent(map, VMMAP_CHECK_ALL)
644#ifdef DIAGNOSTIC
645#define VM_MAP_UNLOCK_CONSISTENT(map) do {				\
646	if (map->nupdates > map->nentries) {				\
647		_vm_map_assert_consistent(map, VMMAP_CHECK_UNLOCK);	\
648		map->nupdates = 0;					\
649	}								\
650} while (0)
651#else
652#define VM_MAP_UNLOCK_CONSISTENT(map)
653#endif
654#else
655#define	VM_MAP_ASSERT_LOCKED(map)
656#define VM_MAP_ASSERT_CONSISTENT(map)
657#define VM_MAP_UNLOCK_CONSISTENT(map)
658#endif /* INVARIANTS */
659
660void
661_vm_map_unlock(vm_map_t map, const char *file, int line)
662{
663
664	VM_MAP_UNLOCK_CONSISTENT(map);
665	if (map->system_map) {
666#ifndef UMA_MD_SMALL_ALLOC
667		if (map == kernel_map && (map->flags & MAP_REPLENISH) != 0) {
668			uma_prealloc(kmapentzone, 1);
669			map->flags &= ~MAP_REPLENISH;
670		}
671#endif
672		mtx_unlock_flags_(&map->system_mtx, 0, file, line);
673	} else {
674		sx_xunlock_(&map->lock, file, line);
675		vm_map_process_deferred();
676	}
677}
678
679void
680_vm_map_lock_read(vm_map_t map, const char *file, int line)
681{
682
683	if (map->system_map)
684		mtx_lock_flags_(&map->system_mtx, 0, file, line);
685	else
686		sx_slock_(&map->lock, file, line);
687}
688
689void
690_vm_map_unlock_read(vm_map_t map, const char *file, int line)
691{
692
693	if (map->system_map) {
694		KASSERT((map->flags & MAP_REPLENISH) == 0,
695		    ("%s: MAP_REPLENISH leaked", __func__));
696		mtx_unlock_flags_(&map->system_mtx, 0, file, line);
697	} else {
698		sx_sunlock_(&map->lock, file, line);
699		vm_map_process_deferred();
700	}
701}
702
703int
704_vm_map_trylock(vm_map_t map, const char *file, int line)
705{
706	int error;
707
708	error = map->system_map ?
709	    !mtx_trylock_flags_(&map->system_mtx, 0, file, line) :
710	    !sx_try_xlock_(&map->lock, file, line);
711	if (error == 0)
712		map->timestamp++;
713	return (error == 0);
714}
715
716int
717_vm_map_trylock_read(vm_map_t map, const char *file, int line)
718{
719	int error;
720
721	error = map->system_map ?
722	    !mtx_trylock_flags_(&map->system_mtx, 0, file, line) :
723	    !sx_try_slock_(&map->lock, file, line);
724	return (error == 0);
725}
726
727/*
728 *	_vm_map_lock_upgrade:	[ internal use only ]
729 *
730 *	Tries to upgrade a read (shared) lock on the specified map to a write
731 *	(exclusive) lock.  Returns the value "0" if the upgrade succeeds and a
732 *	non-zero value if the upgrade fails.  If the upgrade fails, the map is
733 *	returned without a read or write lock held.
734 *
735 *	Requires that the map be read locked.
736 */
737int
738_vm_map_lock_upgrade(vm_map_t map, const char *file, int line)
739{
740	unsigned int last_timestamp;
741
742	if (map->system_map) {
743		mtx_assert_(&map->system_mtx, MA_OWNED, file, line);
744	} else {
745		if (!sx_try_upgrade_(&map->lock, file, line)) {
746			last_timestamp = map->timestamp;
747			sx_sunlock_(&map->lock, file, line);
748			vm_map_process_deferred();
749			/*
750			 * If the map's timestamp does not change while the
751			 * map is unlocked, then the upgrade succeeds.
752			 */
753			sx_xlock_(&map->lock, file, line);
754			if (last_timestamp != map->timestamp) {
755				sx_xunlock_(&map->lock, file, line);
756				return (1);
757			}
758		}
759	}
760	map->timestamp++;
761	return (0);
762}
763
764void
765_vm_map_lock_downgrade(vm_map_t map, const char *file, int line)
766{
767
768	if (map->system_map) {
769		KASSERT((map->flags & MAP_REPLENISH) == 0,
770		    ("%s: MAP_REPLENISH leaked", __func__));
771		mtx_assert_(&map->system_mtx, MA_OWNED, file, line);
772	} else {
773		VM_MAP_UNLOCK_CONSISTENT(map);
774		sx_downgrade_(&map->lock, file, line);
775	}
776}
777
778/*
779 *	vm_map_locked:
780 *
781 *	Returns a non-zero value if the caller holds a write (exclusive) lock
782 *	on the specified map and the value "0" otherwise.
783 */
784int
785vm_map_locked(vm_map_t map)
786{
787
788	if (map->system_map)
789		return (mtx_owned(&map->system_mtx));
790	else
791		return (sx_xlocked(&map->lock));
792}
793
794/*
795 *	_vm_map_unlock_and_wait:
796 *
797 *	Atomically releases the lock on the specified map and puts the calling
798 *	thread to sleep.  The calling thread will remain asleep until either
799 *	vm_map_wakeup() is performed on the map or the specified timeout is
800 *	exceeded.
801 *
802 *	WARNING!  This function does not perform deferred deallocations of
803 *	objects and map	entries.  Therefore, the calling thread is expected to
804 *	reacquire the map lock after reawakening and later perform an ordinary
805 *	unlock operation, such as vm_map_unlock(), before completing its
806 *	operation on the map.
807 */
808int
809_vm_map_unlock_and_wait(vm_map_t map, int timo, const char *file, int line)
810{
811
812	VM_MAP_UNLOCK_CONSISTENT(map);
813	mtx_lock(&map_sleep_mtx);
814	if (map->system_map) {
815		KASSERT((map->flags & MAP_REPLENISH) == 0,
816		    ("%s: MAP_REPLENISH leaked", __func__));
817		mtx_unlock_flags_(&map->system_mtx, 0, file, line);
818	} else {
819		sx_xunlock_(&map->lock, file, line);
820	}
821	return (msleep(&map->root, &map_sleep_mtx, PDROP | PVM, "vmmaps",
822	    timo));
823}
824
825/*
826 *	vm_map_wakeup:
827 *
828 *	Awaken any threads that have slept on the map using
829 *	vm_map_unlock_and_wait().
830 */
831void
832vm_map_wakeup(vm_map_t map)
833{
834
835	/*
836	 * Acquire and release map_sleep_mtx to prevent a wakeup()
837	 * from being performed (and lost) between the map unlock
838	 * and the msleep() in _vm_map_unlock_and_wait().
839	 */
840	mtx_lock(&map_sleep_mtx);
841	mtx_unlock(&map_sleep_mtx);
842	wakeup(&map->root);
843}
844
845void
846vm_map_busy(vm_map_t map)
847{
848
849	VM_MAP_ASSERT_LOCKED(map);
850	map->busy++;
851}
852
853void
854vm_map_unbusy(vm_map_t map)
855{
856
857	VM_MAP_ASSERT_LOCKED(map);
858	KASSERT(map->busy, ("vm_map_unbusy: not busy"));
859	if (--map->busy == 0 && (map->flags & MAP_BUSY_WAKEUP)) {
860		vm_map_modflags(map, 0, MAP_BUSY_WAKEUP);
861		wakeup(&map->busy);
862	}
863}
864
865void
866vm_map_wait_busy(vm_map_t map)
867{
868
869	VM_MAP_ASSERT_LOCKED(map);
870	while (map->busy) {
871		vm_map_modflags(map, MAP_BUSY_WAKEUP, 0);
872		if (map->system_map)
873			msleep(&map->busy, &map->system_mtx, 0, "mbusy", 0);
874		else
875			sx_sleep(&map->busy, &map->lock, 0, "mbusy", 0);
876	}
877	map->timestamp++;
878}
879
880long
881vmspace_resident_count(struct vmspace *vmspace)
882{
883	return pmap_resident_count(vmspace_pmap(vmspace));
884}
885
886/*
887 * Initialize an existing vm_map structure
888 * such as that in the vmspace structure.
889 */
890static void
891_vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, vm_offset_t max)
892{
893
894	map->header.eflags = MAP_ENTRY_HEADER;
895	map->needs_wakeup = FALSE;
896	map->system_map = 0;
897	map->pmap = pmap;
898	map->header.end = min;
899	map->header.start = max;
900	map->flags = 0;
901	map->header.left = map->header.right = &map->header;
902	map->root = NULL;
903	map->timestamp = 0;
904	map->busy = 0;
905	map->anon_loc = 0;
906#ifdef DIAGNOSTIC
907	map->nupdates = 0;
908#endif
909}
910
911void
912vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, vm_offset_t max)
913{
914
915	_vm_map_init(map, pmap, min, max);
916	mtx_init(&map->system_mtx, "vm map (system)", NULL,
917	    MTX_DEF | MTX_DUPOK);
918	sx_init(&map->lock, "vm map (user)");
919}
920
921/*
922 *	vm_map_entry_dispose:	[ internal use only ]
923 *
924 *	Inverse of vm_map_entry_create.
925 */
926static void
927vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry)
928{
929	uma_zfree(map->system_map ? kmapentzone : mapentzone, entry);
930}
931
932/*
933 *	vm_map_entry_create:	[ internal use only ]
934 *
935 *	Allocates a VM map entry for insertion.
936 *	No entry fields are filled in.
937 */
938static vm_map_entry_t
939vm_map_entry_create(vm_map_t map)
940{
941	vm_map_entry_t new_entry;
942
943#ifndef UMA_MD_SMALL_ALLOC
944	if (map == kernel_map) {
945		VM_MAP_ASSERT_LOCKED(map);
946
947		/*
948		 * A new slab of kernel map entries cannot be allocated at this
949		 * point because the kernel map has not yet been updated to
950		 * reflect the caller's request.  Therefore, we allocate a new
951		 * map entry, dipping into the reserve if necessary, and set a
952		 * flag indicating that the reserve must be replenished before
953		 * the map is unlocked.
954		 */
955		new_entry = uma_zalloc(kmapentzone, M_NOWAIT | M_NOVM);
956		if (new_entry == NULL) {
957			new_entry = uma_zalloc(kmapentzone,
958			    M_NOWAIT | M_NOVM | M_USE_RESERVE);
959			kernel_map->flags |= MAP_REPLENISH;
960		}
961	} else
962#endif
963	if (map->system_map) {
964		new_entry = uma_zalloc(kmapentzone, M_NOWAIT);
965	} else {
966		new_entry = uma_zalloc(mapentzone, M_WAITOK);
967	}
968	KASSERT(new_entry != NULL,
969	    ("vm_map_entry_create: kernel resources exhausted"));
970	return (new_entry);
971}
972
973/*
974 *	vm_map_entry_set_behavior:
975 *
976 *	Set the expected access behavior, either normal, random, or
977 *	sequential.
978 */
979static inline void
980vm_map_entry_set_behavior(vm_map_entry_t entry, u_char behavior)
981{
982	entry->eflags = (entry->eflags & ~MAP_ENTRY_BEHAV_MASK) |
983	    (behavior & MAP_ENTRY_BEHAV_MASK);
984}
985
986/*
987 *	vm_map_entry_max_free_{left,right}:
988 *
989 *	Compute the size of the largest free gap between two entries,
990 *	one the root of a tree and the other the ancestor of that root
991 *	that is the least or greatest ancestor found on the search path.
992 */
993static inline vm_size_t
994vm_map_entry_max_free_left(vm_map_entry_t root, vm_map_entry_t left_ancestor)
995{
996
997	return (root->left != left_ancestor ?
998	    root->left->max_free : root->start - left_ancestor->end);
999}
1000
1001static inline vm_size_t
1002vm_map_entry_max_free_right(vm_map_entry_t root, vm_map_entry_t right_ancestor)
1003{
1004
1005	return (root->right != right_ancestor ?
1006	    root->right->max_free : right_ancestor->start - root->end);
1007}
1008
1009/*
1010 *	vm_map_entry_{pred,succ}:
1011 *
1012 *	Find the {predecessor, successor} of the entry by taking one step
1013 *	in the appropriate direction and backtracking as much as necessary.
1014 *	vm_map_entry_succ is defined in vm_map.h.
1015 */
1016static inline vm_map_entry_t
1017vm_map_entry_pred(vm_map_entry_t entry)
1018{
1019	vm_map_entry_t prior;
1020
1021	prior = entry->left;
1022	if (prior->right->start < entry->start) {
1023		do
1024			prior = prior->right;
1025		while (prior->right != entry);
1026	}
1027	return (prior);
1028}
1029
1030static inline vm_size_t
1031vm_size_max(vm_size_t a, vm_size_t b)
1032{
1033
1034	return (a > b ? a : b);
1035}
1036
1037#define SPLAY_LEFT_STEP(root, y, llist, rlist, test) do {		\
1038	vm_map_entry_t z;						\
1039	vm_size_t max_free;						\
1040									\
1041	/*								\
1042	 * Infer root->right->max_free == root->max_free when		\
1043	 * y->max_free < root->max_free || root->max_free == 0.		\
1044	 * Otherwise, look right to find it.				\
1045	 */								\
1046	y = root->left;							\
1047	max_free = root->max_free;					\
1048	KASSERT(max_free == vm_size_max(				\
1049	    vm_map_entry_max_free_left(root, llist),			\
1050	    vm_map_entry_max_free_right(root, rlist)),			\
1051	    ("%s: max_free invariant fails", __func__));		\
1052	if (max_free - 1 < vm_map_entry_max_free_left(root, llist))	\
1053		max_free = vm_map_entry_max_free_right(root, rlist);	\
1054	if (y != llist && (test)) {					\
1055		/* Rotate right and make y root. */			\
1056		z = y->right;						\
1057		if (z != root) {					\
1058			root->left = z;					\
1059			y->right = root;				\
1060			if (max_free < y->max_free)			\
1061			    root->max_free = max_free =			\
1062			    vm_size_max(max_free, z->max_free);		\
1063		} else if (max_free < y->max_free)			\
1064			root->max_free = max_free =			\
1065			    vm_size_max(max_free, root->start - y->end);\
1066		root = y;						\
1067		y = root->left;						\
1068	}								\
1069	/* Copy right->max_free.  Put root on rlist. */			\
1070	root->max_free = max_free;					\
1071	KASSERT(max_free == vm_map_entry_max_free_right(root, rlist),	\
1072	    ("%s: max_free not copied from right", __func__));		\
1073	root->left = rlist;						\
1074	rlist = root;							\
1075	root = y != llist ? y : NULL;					\
1076} while (0)
1077
1078#define SPLAY_RIGHT_STEP(root, y, llist, rlist, test) do {		\
1079	vm_map_entry_t z;						\
1080	vm_size_t max_free;						\
1081									\
1082	/*								\
1083	 * Infer root->left->max_free == root->max_free when		\
1084	 * y->max_free < root->max_free || root->max_free == 0.		\
1085	 * Otherwise, look left to find it.				\
1086	 */								\
1087	y = root->right;						\
1088	max_free = root->max_free;					\
1089	KASSERT(max_free == vm_size_max(				\
1090	    vm_map_entry_max_free_left(root, llist),			\
1091	    vm_map_entry_max_free_right(root, rlist)),			\
1092	    ("%s: max_free invariant fails", __func__));		\
1093	if (max_free - 1 < vm_map_entry_max_free_right(root, rlist))	\
1094		max_free = vm_map_entry_max_free_left(root, llist);	\
1095	if (y != rlist && (test)) {					\
1096		/* Rotate left and make y root. */			\
1097		z = y->left;						\
1098		if (z != root) {					\
1099			root->right = z;				\
1100			y->left = root;					\
1101			if (max_free < y->max_free)			\
1102			    root->max_free = max_free =			\
1103			    vm_size_max(max_free, z->max_free);		\
1104		} else if (max_free < y->max_free)			\
1105			root->max_free = max_free =			\
1106			    vm_size_max(max_free, y->start - root->end);\
1107		root = y;						\
1108		y = root->right;					\
1109	}								\
1110	/* Copy left->max_free.  Put root on llist. */			\
1111	root->max_free = max_free;					\
1112	KASSERT(max_free == vm_map_entry_max_free_left(root, llist),	\
1113	    ("%s: max_free not copied from left", __func__));		\
1114	root->right = llist;						\
1115	llist = root;							\
1116	root = y != rlist ? y : NULL;					\
1117} while (0)
1118
1119/*
1120 * Walk down the tree until we find addr or a gap where addr would go, breaking
1121 * off left and right subtrees of nodes less than, or greater than addr.  Treat
1122 * subtrees with root->max_free < length as empty trees.  llist and rlist are
1123 * the two sides in reverse order (bottom-up), with llist linked by the right
1124 * pointer and rlist linked by the left pointer in the vm_map_entry, and both
1125 * lists terminated by &map->header.  This function, and the subsequent call to
1126 * vm_map_splay_merge_{left,right,pred,succ}, rely on the start and end address
1127 * values in &map->header.
1128 */
1129static __always_inline vm_map_entry_t
1130vm_map_splay_split(vm_map_t map, vm_offset_t addr, vm_size_t length,
1131    vm_map_entry_t *llist, vm_map_entry_t *rlist)
1132{
1133	vm_map_entry_t left, right, root, y;
1134
1135	left = right = &map->header;
1136	root = map->root;
1137	while (root != NULL && root->max_free >= length) {
1138		KASSERT(left->end <= root->start &&
1139		    root->end <= right->start,
1140		    ("%s: root not within tree bounds", __func__));
1141		if (addr < root->start) {
1142			SPLAY_LEFT_STEP(root, y, left, right,
1143			    y->max_free >= length && addr < y->start);
1144		} else if (addr >= root->end) {
1145			SPLAY_RIGHT_STEP(root, y, left, right,
1146			    y->max_free >= length && addr >= y->end);
1147		} else
1148			break;
1149	}
1150	*llist = left;
1151	*rlist = right;
1152	return (root);
1153}
1154
1155static __always_inline void
1156vm_map_splay_findnext(vm_map_entry_t root, vm_map_entry_t *rlist)
1157{
1158	vm_map_entry_t hi, right, y;
1159
1160	right = *rlist;
1161	hi = root->right == right ? NULL : root->right;
1162	if (hi == NULL)
1163		return;
1164	do
1165		SPLAY_LEFT_STEP(hi, y, root, right, true);
1166	while (hi != NULL);
1167	*rlist = right;
1168}
1169
1170static __always_inline void
1171vm_map_splay_findprev(vm_map_entry_t root, vm_map_entry_t *llist)
1172{
1173	vm_map_entry_t left, lo, y;
1174
1175	left = *llist;
1176	lo = root->left == left ? NULL : root->left;
1177	if (lo == NULL)
1178		return;
1179	do
1180		SPLAY_RIGHT_STEP(lo, y, left, root, true);
1181	while (lo != NULL);
1182	*llist = left;
1183}
1184
1185static inline void
1186vm_map_entry_swap(vm_map_entry_t *a, vm_map_entry_t *b)
1187{
1188	vm_map_entry_t tmp;
1189
1190	tmp = *b;
1191	*b = *a;
1192	*a = tmp;
1193}
1194
1195/*
1196 * Walk back up the two spines, flip the pointers and set max_free.  The
1197 * subtrees of the root go at the bottom of llist and rlist.
1198 */
1199static vm_size_t
1200vm_map_splay_merge_left_walk(vm_map_entry_t header, vm_map_entry_t root,
1201    vm_map_entry_t tail, vm_size_t max_free, vm_map_entry_t llist)
1202{
1203	do {
1204		/*
1205		 * The max_free values of the children of llist are in
1206		 * llist->max_free and max_free.  Update with the
1207		 * max value.
1208		 */
1209		llist->max_free = max_free =
1210		    vm_size_max(llist->max_free, max_free);
1211		vm_map_entry_swap(&llist->right, &tail);
1212		vm_map_entry_swap(&tail, &llist);
1213	} while (llist != header);
1214	root->left = tail;
1215	return (max_free);
1216}
1217
1218/*
1219 * When llist is known to be the predecessor of root.
1220 */
1221static inline vm_size_t
1222vm_map_splay_merge_pred(vm_map_entry_t header, vm_map_entry_t root,
1223    vm_map_entry_t llist)
1224{
1225	vm_size_t max_free;
1226
1227	max_free = root->start - llist->end;
1228	if (llist != header) {
1229		max_free = vm_map_splay_merge_left_walk(header, root,
1230		    root, max_free, llist);
1231	} else {
1232		root->left = header;
1233		header->right = root;
1234	}
1235	return (max_free);
1236}
1237
1238/*
1239 * When llist may or may not be the predecessor of root.
1240 */
1241static inline vm_size_t
1242vm_map_splay_merge_left(vm_map_entry_t header, vm_map_entry_t root,
1243    vm_map_entry_t llist)
1244{
1245	vm_size_t max_free;
1246
1247	max_free = vm_map_entry_max_free_left(root, llist);
1248	if (llist != header) {
1249		max_free = vm_map_splay_merge_left_walk(header, root,
1250		    root->left == llist ? root : root->left,
1251		    max_free, llist);
1252	}
1253	return (max_free);
1254}
1255
1256static vm_size_t
1257vm_map_splay_merge_right_walk(vm_map_entry_t header, vm_map_entry_t root,
1258    vm_map_entry_t tail, vm_size_t max_free, vm_map_entry_t rlist)
1259{
1260	do {
1261		/*
1262		 * The max_free values of the children of rlist are in
1263		 * rlist->max_free and max_free.  Update with the
1264		 * max value.
1265		 */
1266		rlist->max_free = max_free =
1267		    vm_size_max(rlist->max_free, max_free);
1268		vm_map_entry_swap(&rlist->left, &tail);
1269		vm_map_entry_swap(&tail, &rlist);
1270	} while (rlist != header);
1271	root->right = tail;
1272	return (max_free);
1273}
1274
1275/*
1276 * When rlist is known to be the succecessor of root.
1277 */
1278static inline vm_size_t
1279vm_map_splay_merge_succ(vm_map_entry_t header, vm_map_entry_t root,
1280    vm_map_entry_t rlist)
1281{
1282	vm_size_t max_free;
1283
1284	max_free = rlist->start - root->end;
1285	if (rlist != header) {
1286		max_free = vm_map_splay_merge_right_walk(header, root,
1287		    root, max_free, rlist);
1288	} else {
1289		root->right = header;
1290		header->left = root;
1291	}
1292	return (max_free);
1293}
1294
1295/*
1296 * When rlist may or may not be the succecessor of root.
1297 */
1298static inline vm_size_t
1299vm_map_splay_merge_right(vm_map_entry_t header, vm_map_entry_t root,
1300    vm_map_entry_t rlist)
1301{
1302	vm_size_t max_free;
1303
1304	max_free = vm_map_entry_max_free_right(root, rlist);
1305	if (rlist != header) {
1306		max_free = vm_map_splay_merge_right_walk(header, root,
1307		    root->right == rlist ? root : root->right,
1308		    max_free, rlist);
1309	}
1310	return (max_free);
1311}
1312
1313/*
1314 *	vm_map_splay:
1315 *
1316 *	The Sleator and Tarjan top-down splay algorithm with the
1317 *	following variation.  Max_free must be computed bottom-up, so
1318 *	on the downward pass, maintain the left and right spines in
1319 *	reverse order.  Then, make a second pass up each side to fix
1320 *	the pointers and compute max_free.  The time bound is O(log n)
1321 *	amortized.
1322 *
1323 *	The tree is threaded, which means that there are no null pointers.
1324 *	When a node has no left child, its left pointer points to its
1325 *	predecessor, which the last ancestor on the search path from the root
1326 *	where the search branched right.  Likewise, when a node has no right
1327 *	child, its right pointer points to its successor.  The map header node
1328 *	is the predecessor of the first map entry, and the successor of the
1329 *	last.
1330 *
1331 *	The new root is the vm_map_entry containing "addr", or else an
1332 *	adjacent entry (lower if possible) if addr is not in the tree.
1333 *
1334 *	The map must be locked, and leaves it so.
1335 *
1336 *	Returns: the new root.
1337 */
1338static vm_map_entry_t
1339vm_map_splay(vm_map_t map, vm_offset_t addr)
1340{
1341	vm_map_entry_t header, llist, rlist, root;
1342	vm_size_t max_free_left, max_free_right;
1343
1344	header = &map->header;
1345	root = vm_map_splay_split(map, addr, 0, &llist, &rlist);
1346	if (root != NULL) {
1347		max_free_left = vm_map_splay_merge_left(header, root, llist);
1348		max_free_right = vm_map_splay_merge_right(header, root, rlist);
1349	} else if (llist != header) {
1350		/*
1351		 * Recover the greatest node in the left
1352		 * subtree and make it the root.
1353		 */
1354		root = llist;
1355		llist = root->right;
1356		max_free_left = vm_map_splay_merge_left(header, root, llist);
1357		max_free_right = vm_map_splay_merge_succ(header, root, rlist);
1358	} else if (rlist != header) {
1359		/*
1360		 * Recover the least node in the right
1361		 * subtree and make it the root.
1362		 */
1363		root = rlist;
1364		rlist = root->left;
1365		max_free_left = vm_map_splay_merge_pred(header, root, llist);
1366		max_free_right = vm_map_splay_merge_right(header, root, rlist);
1367	} else {
1368		/* There is no root. */
1369		return (NULL);
1370	}
1371	root->max_free = vm_size_max(max_free_left, max_free_right);
1372	map->root = root;
1373	VM_MAP_ASSERT_CONSISTENT(map);
1374	return (root);
1375}
1376
1377/*
1378 *	vm_map_entry_{un,}link:
1379 *
1380 *	Insert/remove entries from maps.  On linking, if new entry clips
1381 *	existing entry, trim existing entry to avoid overlap, and manage
1382 *	offsets.  On unlinking, merge disappearing entry with neighbor, if
1383 *	called for, and manage offsets.  Callers should not modify fields in
1384 *	entries already mapped.
1385 */
1386static void
1387vm_map_entry_link(vm_map_t map, vm_map_entry_t entry)
1388{
1389	vm_map_entry_t header, llist, rlist, root;
1390	vm_size_t max_free_left, max_free_right;
1391
1392	CTR3(KTR_VM,
1393	    "vm_map_entry_link: map %p, nentries %d, entry %p", map,
1394	    map->nentries, entry);
1395	VM_MAP_ASSERT_LOCKED(map);
1396	map->nentries++;
1397	header = &map->header;
1398	root = vm_map_splay_split(map, entry->start, 0, &llist, &rlist);
1399	if (root == NULL) {
1400		/*
1401		 * The new entry does not overlap any existing entry in the
1402		 * map, so it becomes the new root of the map tree.
1403		 */
1404		max_free_left = vm_map_splay_merge_pred(header, entry, llist);
1405		max_free_right = vm_map_splay_merge_succ(header, entry, rlist);
1406	} else if (entry->start == root->start) {
1407		/*
1408		 * The new entry is a clone of root, with only the end field
1409		 * changed.  The root entry will be shrunk to abut the new
1410		 * entry, and will be the right child of the new root entry in
1411		 * the modified map.
1412		 */
1413		KASSERT(entry->end < root->end,
1414		    ("%s: clip_start not within entry", __func__));
1415		vm_map_splay_findprev(root, &llist);
1416		root->offset += entry->end - root->start;
1417		root->start = entry->end;
1418		max_free_left = vm_map_splay_merge_pred(header, entry, llist);
1419		max_free_right = root->max_free = vm_size_max(
1420		    vm_map_splay_merge_pred(entry, root, entry),
1421		    vm_map_splay_merge_right(header, root, rlist));
1422	} else {
1423		/*
1424		 * The new entry is a clone of root, with only the start field
1425		 * changed.  The root entry will be shrunk to abut the new
1426		 * entry, and will be the left child of the new root entry in
1427		 * the modified map.
1428		 */
1429		KASSERT(entry->end == root->end,
1430		    ("%s: clip_start not within entry", __func__));
1431		vm_map_splay_findnext(root, &rlist);
1432		entry->offset += entry->start - root->start;
1433		root->end = entry->start;
1434		max_free_left = root->max_free = vm_size_max(
1435		    vm_map_splay_merge_left(header, root, llist),
1436		    vm_map_splay_merge_succ(entry, root, entry));
1437		max_free_right = vm_map_splay_merge_succ(header, entry, rlist);
1438	}
1439	entry->max_free = vm_size_max(max_free_left, max_free_right);
1440	map->root = entry;
1441	VM_MAP_ASSERT_CONSISTENT(map);
1442}
1443
1444enum unlink_merge_type {
1445	UNLINK_MERGE_NONE,
1446	UNLINK_MERGE_NEXT
1447};
1448
1449static void
1450vm_map_entry_unlink(vm_map_t map, vm_map_entry_t entry,
1451    enum unlink_merge_type op)
1452{
1453	vm_map_entry_t header, llist, rlist, root;
1454	vm_size_t max_free_left, max_free_right;
1455
1456	VM_MAP_ASSERT_LOCKED(map);
1457	header = &map->header;
1458	root = vm_map_splay_split(map, entry->start, 0, &llist, &rlist);
1459	KASSERT(root != NULL,
1460	    ("vm_map_entry_unlink: unlink object not mapped"));
1461
1462	vm_map_splay_findprev(root, &llist);
1463	vm_map_splay_findnext(root, &rlist);
1464	if (op == UNLINK_MERGE_NEXT) {
1465		rlist->start = root->start;
1466		rlist->offset = root->offset;
1467	}
1468	if (llist != header) {
1469		root = llist;
1470		llist = root->right;
1471		max_free_left = vm_map_splay_merge_left(header, root, llist);
1472		max_free_right = vm_map_splay_merge_succ(header, root, rlist);
1473	} else if (rlist != header) {
1474		root = rlist;
1475		rlist = root->left;
1476		max_free_left = vm_map_splay_merge_pred(header, root, llist);
1477		max_free_right = vm_map_splay_merge_right(header, root, rlist);
1478	} else {
1479		header->left = header->right = header;
1480		root = NULL;
1481	}
1482	if (root != NULL)
1483		root->max_free = vm_size_max(max_free_left, max_free_right);
1484	map->root = root;
1485	VM_MAP_ASSERT_CONSISTENT(map);
1486	map->nentries--;
1487	CTR3(KTR_VM, "vm_map_entry_unlink: map %p, nentries %d, entry %p", map,
1488	    map->nentries, entry);
1489}
1490
1491/*
1492 *	vm_map_entry_resize:
1493 *
1494 *	Resize a vm_map_entry, recompute the amount of free space that
1495 *	follows it and propagate that value up the tree.
1496 *
1497 *	The map must be locked, and leaves it so.
1498 */
1499static void
1500vm_map_entry_resize(vm_map_t map, vm_map_entry_t entry, vm_size_t grow_amount)
1501{
1502	vm_map_entry_t header, llist, rlist, root;
1503
1504	VM_MAP_ASSERT_LOCKED(map);
1505	header = &map->header;
1506	root = vm_map_splay_split(map, entry->start, 0, &llist, &rlist);
1507	KASSERT(root != NULL, ("%s: resize object not mapped", __func__));
1508	vm_map_splay_findnext(root, &rlist);
1509	entry->end += grow_amount;
1510	root->max_free = vm_size_max(
1511	    vm_map_splay_merge_left(header, root, llist),
1512	    vm_map_splay_merge_succ(header, root, rlist));
1513	map->root = root;
1514	VM_MAP_ASSERT_CONSISTENT(map);
1515	CTR4(KTR_VM, "%s: map %p, nentries %d, entry %p",
1516	    __func__, map, map->nentries, entry);
1517}
1518
1519/*
1520 *	vm_map_lookup_entry:	[ internal use only ]
1521 *
1522 *	Finds the map entry containing (or
1523 *	immediately preceding) the specified address
1524 *	in the given map; the entry is returned
1525 *	in the "entry" parameter.  The boolean
1526 *	result indicates whether the address is
1527 *	actually contained in the map.
1528 */
1529boolean_t
1530vm_map_lookup_entry(
1531	vm_map_t map,
1532	vm_offset_t address,
1533	vm_map_entry_t *entry)	/* OUT */
1534{
1535	vm_map_entry_t cur, header, lbound, ubound;
1536	boolean_t locked;
1537
1538	/*
1539	 * If the map is empty, then the map entry immediately preceding
1540	 * "address" is the map's header.
1541	 */
1542	header = &map->header;
1543	cur = map->root;
1544	if (cur == NULL) {
1545		*entry = header;
1546		return (FALSE);
1547	}
1548	if (address >= cur->start && cur->end > address) {
1549		*entry = cur;
1550		return (TRUE);
1551	}
1552	if ((locked = vm_map_locked(map)) ||
1553	    sx_try_upgrade(&map->lock)) {
1554		/*
1555		 * Splay requires a write lock on the map.  However, it only
1556		 * restructures the binary search tree; it does not otherwise
1557		 * change the map.  Thus, the map's timestamp need not change
1558		 * on a temporary upgrade.
1559		 */
1560		cur = vm_map_splay(map, address);
1561		if (!locked) {
1562			VM_MAP_UNLOCK_CONSISTENT(map);
1563			sx_downgrade(&map->lock);
1564		}
1565
1566		/*
1567		 * If "address" is contained within a map entry, the new root
1568		 * is that map entry.  Otherwise, the new root is a map entry
1569		 * immediately before or after "address".
1570		 */
1571		if (address < cur->start) {
1572			*entry = header;
1573			return (FALSE);
1574		}
1575		*entry = cur;
1576		return (address < cur->end);
1577	}
1578	/*
1579	 * Since the map is only locked for read access, perform a
1580	 * standard binary search tree lookup for "address".
1581	 */
1582	lbound = ubound = header;
1583	for (;;) {
1584		if (address < cur->start) {
1585			ubound = cur;
1586			cur = cur->left;
1587			if (cur == lbound)
1588				break;
1589		} else if (cur->end <= address) {
1590			lbound = cur;
1591			cur = cur->right;
1592			if (cur == ubound)
1593				break;
1594		} else {
1595			*entry = cur;
1596			return (TRUE);
1597		}
1598	}
1599	*entry = lbound;
1600	return (FALSE);
1601}
1602
1603/*
1604 *	vm_map_insert:
1605 *
1606 *	Inserts the given whole VM object into the target
1607 *	map at the specified address range.  The object's
1608 *	size should match that of the address range.
1609 *
1610 *	Requires that the map be locked, and leaves it so.
1611 *
1612 *	If object is non-NULL, ref count must be bumped by caller
1613 *	prior to making call to account for the new entry.
1614 */
1615int
1616vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
1617    vm_offset_t start, vm_offset_t end, vm_prot_t prot, vm_prot_t max, int cow)
1618{
1619	vm_map_entry_t new_entry, next_entry, prev_entry;
1620	struct ucred *cred;
1621	vm_eflags_t protoeflags;
1622	vm_inherit_t inheritance;
1623	u_long bdry;
1624	u_int bidx;
1625
1626	VM_MAP_ASSERT_LOCKED(map);
1627	KASSERT(object != kernel_object ||
1628	    (cow & MAP_COPY_ON_WRITE) == 0,
1629	    ("vm_map_insert: kernel object and COW"));
1630	KASSERT(object == NULL || (cow & MAP_NOFAULT) == 0 ||
1631	    (cow & MAP_SPLIT_BOUNDARY_MASK) != 0,
1632	    ("vm_map_insert: paradoxical MAP_NOFAULT request, obj %p cow %#x",
1633	    object, cow));
1634	KASSERT((prot & ~max) == 0,
1635	    ("prot %#x is not subset of max_prot %#x", prot, max));
1636
1637	/*
1638	 * Check that the start and end points are not bogus.
1639	 */
1640	if (start == end || !vm_map_range_valid(map, start, end))
1641		return (KERN_INVALID_ADDRESS);
1642
1643	if ((map->flags & MAP_WXORX) != 0 && (prot & (VM_PROT_WRITE |
1644	    VM_PROT_EXECUTE)) == (VM_PROT_WRITE | VM_PROT_EXECUTE))
1645		return (KERN_PROTECTION_FAILURE);
1646
1647	/*
1648	 * Find the entry prior to the proposed starting address; if it's part
1649	 * of an existing entry, this range is bogus.
1650	 */
1651	if (vm_map_lookup_entry(map, start, &prev_entry))
1652		return (KERN_NO_SPACE);
1653
1654	/*
1655	 * Assert that the next entry doesn't overlap the end point.
1656	 */
1657	next_entry = vm_map_entry_succ(prev_entry);
1658	if (next_entry->start < end)
1659		return (KERN_NO_SPACE);
1660
1661	if ((cow & MAP_CREATE_GUARD) != 0 && (object != NULL ||
1662	    max != VM_PROT_NONE))
1663		return (KERN_INVALID_ARGUMENT);
1664
1665	protoeflags = 0;
1666	if (cow & MAP_COPY_ON_WRITE)
1667		protoeflags |= MAP_ENTRY_COW | MAP_ENTRY_NEEDS_COPY;
1668	if (cow & MAP_NOFAULT)
1669		protoeflags |= MAP_ENTRY_NOFAULT;
1670	if (cow & MAP_DISABLE_SYNCER)
1671		protoeflags |= MAP_ENTRY_NOSYNC;
1672	if (cow & MAP_DISABLE_COREDUMP)
1673		protoeflags |= MAP_ENTRY_NOCOREDUMP;
1674	if (cow & MAP_STACK_GROWS_DOWN)
1675		protoeflags |= MAP_ENTRY_GROWS_DOWN;
1676	if (cow & MAP_STACK_GROWS_UP)
1677		protoeflags |= MAP_ENTRY_GROWS_UP;
1678	if (cow & MAP_WRITECOUNT)
1679		protoeflags |= MAP_ENTRY_WRITECNT;
1680	if (cow & MAP_VN_EXEC)
1681		protoeflags |= MAP_ENTRY_VN_EXEC;
1682	if ((cow & MAP_CREATE_GUARD) != 0)
1683		protoeflags |= MAP_ENTRY_GUARD;
1684	if ((cow & MAP_CREATE_STACK_GAP_DN) != 0)
1685		protoeflags |= MAP_ENTRY_STACK_GAP_DN;
1686	if ((cow & MAP_CREATE_STACK_GAP_UP) != 0)
1687		protoeflags |= MAP_ENTRY_STACK_GAP_UP;
1688	if (cow & MAP_INHERIT_SHARE)
1689		inheritance = VM_INHERIT_SHARE;
1690	else
1691		inheritance = VM_INHERIT_DEFAULT;
1692	if ((cow & MAP_SPLIT_BOUNDARY_MASK) != 0) {
1693		/* This magically ignores index 0, for usual page size. */
1694		bidx = (cow & MAP_SPLIT_BOUNDARY_MASK) >>
1695		    MAP_SPLIT_BOUNDARY_SHIFT;
1696		if (bidx >= MAXPAGESIZES)
1697			return (KERN_INVALID_ARGUMENT);
1698		bdry = pagesizes[bidx] - 1;
1699		if ((start & bdry) != 0 || (end & bdry) != 0)
1700			return (KERN_INVALID_ARGUMENT);
1701		protoeflags |= bidx << MAP_ENTRY_SPLIT_BOUNDARY_SHIFT;
1702	}
1703
1704	cred = NULL;
1705	if ((cow & (MAP_ACC_NO_CHARGE | MAP_NOFAULT | MAP_CREATE_GUARD)) != 0)
1706		goto charged;
1707	if ((cow & MAP_ACC_CHARGED) || ((prot & VM_PROT_WRITE) &&
1708	    ((protoeflags & MAP_ENTRY_NEEDS_COPY) || object == NULL))) {
1709		if (!(cow & MAP_ACC_CHARGED) && !swap_reserve(end - start))
1710			return (KERN_RESOURCE_SHORTAGE);
1711		KASSERT(object == NULL ||
1712		    (protoeflags & MAP_ENTRY_NEEDS_COPY) != 0 ||
1713		    object->cred == NULL,
1714		    ("overcommit: vm_map_insert o %p", object));
1715		cred = curthread->td_ucred;
1716	}
1717
1718charged:
1719	/* Expand the kernel pmap, if necessary. */
1720	if (map == kernel_map && end > kernel_vm_end)
1721		pmap_growkernel(end);
1722	if (object != NULL) {
1723		/*
1724		 * OBJ_ONEMAPPING must be cleared unless this mapping
1725		 * is trivially proven to be the only mapping for any
1726		 * of the object's pages.  (Object granularity
1727		 * reference counting is insufficient to recognize
1728		 * aliases with precision.)
1729		 */
1730		if ((object->flags & OBJ_ANON) != 0) {
1731			VM_OBJECT_WLOCK(object);
1732			if (object->ref_count > 1 || object->shadow_count != 0)
1733				vm_object_clear_flag(object, OBJ_ONEMAPPING);
1734			VM_OBJECT_WUNLOCK(object);
1735		}
1736	} else if ((prev_entry->eflags & ~MAP_ENTRY_USER_WIRED) ==
1737	    protoeflags &&
1738	    (cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP |
1739	    MAP_VN_EXEC)) == 0 &&
1740	    prev_entry->end == start && (prev_entry->cred == cred ||
1741	    (prev_entry->object.vm_object != NULL &&
1742	    prev_entry->object.vm_object->cred == cred)) &&
1743	    vm_object_coalesce(prev_entry->object.vm_object,
1744	    prev_entry->offset,
1745	    (vm_size_t)(prev_entry->end - prev_entry->start),
1746	    (vm_size_t)(end - prev_entry->end), cred != NULL &&
1747	    (protoeflags & MAP_ENTRY_NEEDS_COPY) == 0)) {
1748		/*
1749		 * We were able to extend the object.  Determine if we
1750		 * can extend the previous map entry to include the
1751		 * new range as well.
1752		 */
1753		if (prev_entry->inheritance == inheritance &&
1754		    prev_entry->protection == prot &&
1755		    prev_entry->max_protection == max &&
1756		    prev_entry->wired_count == 0) {
1757			KASSERT((prev_entry->eflags & MAP_ENTRY_USER_WIRED) ==
1758			    0, ("prev_entry %p has incoherent wiring",
1759			    prev_entry));
1760			if ((prev_entry->eflags & MAP_ENTRY_GUARD) == 0)
1761				map->size += end - prev_entry->end;
1762			vm_map_entry_resize(map, prev_entry,
1763			    end - prev_entry->end);
1764			vm_map_try_merge_entries(map, prev_entry, next_entry);
1765			return (KERN_SUCCESS);
1766		}
1767
1768		/*
1769		 * If we can extend the object but cannot extend the
1770		 * map entry, we have to create a new map entry.  We
1771		 * must bump the ref count on the extended object to
1772		 * account for it.  object may be NULL.
1773		 */
1774		object = prev_entry->object.vm_object;
1775		offset = prev_entry->offset +
1776		    (prev_entry->end - prev_entry->start);
1777		vm_object_reference(object);
1778		if (cred != NULL && object != NULL && object->cred != NULL &&
1779		    !(prev_entry->eflags & MAP_ENTRY_NEEDS_COPY)) {
1780			/* Object already accounts for this uid. */
1781			cred = NULL;
1782		}
1783	}
1784	if (cred != NULL)
1785		crhold(cred);
1786
1787	/*
1788	 * Create a new entry
1789	 */
1790	new_entry = vm_map_entry_create(map);
1791	new_entry->start = start;
1792	new_entry->end = end;
1793	new_entry->cred = NULL;
1794
1795	new_entry->eflags = protoeflags;
1796	new_entry->object.vm_object = object;
1797	new_entry->offset = offset;
1798
1799	new_entry->inheritance = inheritance;
1800	new_entry->protection = prot;
1801	new_entry->max_protection = max;
1802	new_entry->wired_count = 0;
1803	new_entry->wiring_thread = NULL;
1804	new_entry->read_ahead = VM_FAULT_READ_AHEAD_INIT;
1805	new_entry->next_read = start;
1806
1807	KASSERT(cred == NULL || !ENTRY_CHARGED(new_entry),
1808	    ("overcommit: vm_map_insert leaks vm_map %p", new_entry));
1809	new_entry->cred = cred;
1810
1811	/*
1812	 * Insert the new entry into the list
1813	 */
1814	vm_map_entry_link(map, new_entry);
1815	if ((new_entry->eflags & MAP_ENTRY_GUARD) == 0)
1816		map->size += new_entry->end - new_entry->start;
1817
1818	/*
1819	 * Try to coalesce the new entry with both the previous and next
1820	 * entries in the list.  Previously, we only attempted to coalesce
1821	 * with the previous entry when object is NULL.  Here, we handle the
1822	 * other cases, which are less common.
1823	 */
1824	vm_map_try_merge_entries(map, prev_entry, new_entry);
1825	vm_map_try_merge_entries(map, new_entry, next_entry);
1826
1827	if ((cow & (MAP_PREFAULT | MAP_PREFAULT_PARTIAL)) != 0) {
1828		vm_map_pmap_enter(map, start, prot, object, OFF_TO_IDX(offset),
1829		    end - start, cow & MAP_PREFAULT_PARTIAL);
1830	}
1831
1832	return (KERN_SUCCESS);
1833}
1834
1835/*
1836 *	vm_map_findspace:
1837 *
1838 *	Find the first fit (lowest VM address) for "length" free bytes
1839 *	beginning at address >= start in the given map.
1840 *
1841 *	In a vm_map_entry, "max_free" is the maximum amount of
1842 *	contiguous free space between an entry in its subtree and a
1843 *	neighbor of that entry.  This allows finding a free region in
1844 *	one path down the tree, so O(log n) amortized with splay
1845 *	trees.
1846 *
1847 *	The map must be locked, and leaves it so.
1848 *
1849 *	Returns: starting address if sufficient space,
1850 *		 vm_map_max(map)-length+1 if insufficient space.
1851 */
1852vm_offset_t
1853vm_map_findspace(vm_map_t map, vm_offset_t start, vm_size_t length)
1854{
1855	vm_map_entry_t header, llist, rlist, root, y;
1856	vm_size_t left_length, max_free_left, max_free_right;
1857	vm_offset_t gap_end;
1858
1859	VM_MAP_ASSERT_LOCKED(map);
1860
1861	/*
1862	 * Request must fit within min/max VM address and must avoid
1863	 * address wrap.
1864	 */
1865	start = MAX(start, vm_map_min(map));
1866	if (start >= vm_map_max(map) || length > vm_map_max(map) - start)
1867		return (vm_map_max(map) - length + 1);
1868
1869	/* Empty tree means wide open address space. */
1870	if (map->root == NULL)
1871		return (start);
1872
1873	/*
1874	 * After splay_split, if start is within an entry, push it to the start
1875	 * of the following gap.  If rlist is at the end of the gap containing
1876	 * start, save the end of that gap in gap_end to see if the gap is big
1877	 * enough; otherwise set gap_end to start skip gap-checking and move
1878	 * directly to a search of the right subtree.
1879	 */
1880	header = &map->header;
1881	root = vm_map_splay_split(map, start, length, &llist, &rlist);
1882	gap_end = rlist->start;
1883	if (root != NULL) {
1884		start = root->end;
1885		if (root->right != rlist)
1886			gap_end = start;
1887		max_free_left = vm_map_splay_merge_left(header, root, llist);
1888		max_free_right = vm_map_splay_merge_right(header, root, rlist);
1889	} else if (rlist != header) {
1890		root = rlist;
1891		rlist = root->left;
1892		max_free_left = vm_map_splay_merge_pred(header, root, llist);
1893		max_free_right = vm_map_splay_merge_right(header, root, rlist);
1894	} else {
1895		root = llist;
1896		llist = root->right;
1897		max_free_left = vm_map_splay_merge_left(header, root, llist);
1898		max_free_right = vm_map_splay_merge_succ(header, root, rlist);
1899	}
1900	root->max_free = vm_size_max(max_free_left, max_free_right);
1901	map->root = root;
1902	VM_MAP_ASSERT_CONSISTENT(map);
1903	if (length <= gap_end - start)
1904		return (start);
1905
1906	/* With max_free, can immediately tell if no solution. */
1907	if (root->right == header || length > root->right->max_free)
1908		return (vm_map_max(map) - length + 1);
1909
1910	/*
1911	 * Splay for the least large-enough gap in the right subtree.
1912	 */
1913	llist = rlist = header;
1914	for (left_length = 0;;
1915	    left_length = vm_map_entry_max_free_left(root, llist)) {
1916		if (length <= left_length)
1917			SPLAY_LEFT_STEP(root, y, llist, rlist,
1918			    length <= vm_map_entry_max_free_left(y, llist));
1919		else
1920			SPLAY_RIGHT_STEP(root, y, llist, rlist,
1921			    length > vm_map_entry_max_free_left(y, root));
1922		if (root == NULL)
1923			break;
1924	}
1925	root = llist;
1926	llist = root->right;
1927	max_free_left = vm_map_splay_merge_left(header, root, llist);
1928	if (rlist == header) {
1929		root->max_free = vm_size_max(max_free_left,
1930		    vm_map_splay_merge_succ(header, root, rlist));
1931	} else {
1932		y = rlist;
1933		rlist = y->left;
1934		y->max_free = vm_size_max(
1935		    vm_map_splay_merge_pred(root, y, root),
1936		    vm_map_splay_merge_right(header, y, rlist));
1937		root->max_free = vm_size_max(max_free_left, y->max_free);
1938	}
1939	map->root = root;
1940	VM_MAP_ASSERT_CONSISTENT(map);
1941	return (root->end);
1942}
1943
1944int
1945vm_map_fixed(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
1946    vm_offset_t start, vm_size_t length, vm_prot_t prot,
1947    vm_prot_t max, int cow)
1948{
1949	vm_offset_t end;
1950	int result;
1951
1952	end = start + length;
1953	KASSERT((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) == 0 ||
1954	    object == NULL,
1955	    ("vm_map_fixed: non-NULL backing object for stack"));
1956	vm_map_lock(map);
1957	VM_MAP_RANGE_CHECK(map, start, end);
1958	if ((cow & MAP_CHECK_EXCL) == 0) {
1959		result = vm_map_delete(map, start, end);
1960		if (result != KERN_SUCCESS)
1961			goto out;
1962	}
1963	if ((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) != 0) {
1964		result = vm_map_stack_locked(map, start, length, sgrowsiz,
1965		    prot, max, cow);
1966	} else {
1967		result = vm_map_insert(map, object, offset, start, end,
1968		    prot, max, cow);
1969	}
1970out:
1971	vm_map_unlock(map);
1972	return (result);
1973}
1974
1975static const int aslr_pages_rnd_64[2] = {0x1000, 0x10};
1976static const int aslr_pages_rnd_32[2] = {0x100, 0x4};
1977
1978static int cluster_anon = 1;
1979SYSCTL_INT(_vm, OID_AUTO, cluster_anon, CTLFLAG_RW,
1980    &cluster_anon, 0,
1981    "Cluster anonymous mappings: 0 = no, 1 = yes if no hint, 2 = always");
1982
1983static bool
1984clustering_anon_allowed(vm_offset_t addr)
1985{
1986
1987	switch (cluster_anon) {
1988	case 0:
1989		return (false);
1990	case 1:
1991		return (addr == 0);
1992	case 2:
1993	default:
1994		return (true);
1995	}
1996}
1997
1998static long aslr_restarts;
1999SYSCTL_LONG(_vm, OID_AUTO, aslr_restarts, CTLFLAG_RD,
2000    &aslr_restarts, 0,
2001    "Number of aslr failures");
2002
2003/*
2004 * Searches for the specified amount of free space in the given map with the
2005 * specified alignment.  Performs an address-ordered, first-fit search from
2006 * the given address "*addr", with an optional upper bound "max_addr".  If the
2007 * parameter "alignment" is zero, then the alignment is computed from the
2008 * given (object, offset) pair so as to enable the greatest possible use of
2009 * superpage mappings.  Returns KERN_SUCCESS and the address of the free space
2010 * in "*addr" if successful.  Otherwise, returns KERN_NO_SPACE.
2011 *
2012 * The map must be locked.  Initially, there must be at least "length" bytes
2013 * of free space at the given address.
2014 */
2015static int
2016vm_map_alignspace(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
2017    vm_offset_t *addr, vm_size_t length, vm_offset_t max_addr,
2018    vm_offset_t alignment)
2019{
2020	vm_offset_t aligned_addr, free_addr;
2021
2022	VM_MAP_ASSERT_LOCKED(map);
2023	free_addr = *addr;
2024	KASSERT(free_addr == vm_map_findspace(map, free_addr, length),
2025	    ("caller failed to provide space %#jx at address %p",
2026	     (uintmax_t)length, (void *)free_addr));
2027	for (;;) {
2028		/*
2029		 * At the start of every iteration, the free space at address
2030		 * "*addr" is at least "length" bytes.
2031		 */
2032		if (alignment == 0)
2033			pmap_align_superpage(object, offset, addr, length);
2034		else if ((*addr & (alignment - 1)) != 0) {
2035			*addr &= ~(alignment - 1);
2036			*addr += alignment;
2037		}
2038		aligned_addr = *addr;
2039		if (aligned_addr == free_addr) {
2040			/*
2041			 * Alignment did not change "*addr", so "*addr" must
2042			 * still provide sufficient free space.
2043			 */
2044			return (KERN_SUCCESS);
2045		}
2046
2047		/*
2048		 * Test for address wrap on "*addr".  A wrapped "*addr" could
2049		 * be a valid address, in which case vm_map_findspace() cannot
2050		 * be relied upon to fail.
2051		 */
2052		if (aligned_addr < free_addr)
2053			return (KERN_NO_SPACE);
2054		*addr = vm_map_findspace(map, aligned_addr, length);
2055		if (*addr + length > vm_map_max(map) ||
2056		    (max_addr != 0 && *addr + length > max_addr))
2057			return (KERN_NO_SPACE);
2058		free_addr = *addr;
2059		if (free_addr == aligned_addr) {
2060			/*
2061			 * If a successful call to vm_map_findspace() did not
2062			 * change "*addr", then "*addr" must still be aligned
2063			 * and provide sufficient free space.
2064			 */
2065			return (KERN_SUCCESS);
2066		}
2067	}
2068}
2069
2070int
2071vm_map_find_aligned(vm_map_t map, vm_offset_t *addr, vm_size_t length,
2072    vm_offset_t max_addr, vm_offset_t alignment)
2073{
2074	/* XXXKIB ASLR eh ? */
2075	*addr = vm_map_findspace(map, *addr, length);
2076	if (*addr + length > vm_map_max(map) ||
2077	    (max_addr != 0 && *addr + length > max_addr))
2078		return (KERN_NO_SPACE);
2079	return (vm_map_alignspace(map, NULL, 0, addr, length, max_addr,
2080	    alignment));
2081}
2082
2083/*
2084 *	vm_map_find finds an unallocated region in the target address
2085 *	map with the given length.  The search is defined to be
2086 *	first-fit from the specified address; the region found is
2087 *	returned in the same parameter.
2088 *
2089 *	If object is non-NULL, ref count must be bumped by caller
2090 *	prior to making call to account for the new entry.
2091 */
2092int
2093vm_map_find(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
2094	    vm_offset_t *addr,	/* IN/OUT */
2095	    vm_size_t length, vm_offset_t max_addr, int find_space,
2096	    vm_prot_t prot, vm_prot_t max, int cow)
2097{
2098	vm_offset_t alignment, curr_min_addr, min_addr;
2099	int gap, pidx, rv, try;
2100	bool cluster, en_aslr, update_anon;
2101
2102	KASSERT((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) == 0 ||
2103	    object == NULL,
2104	    ("vm_map_find: non-NULL backing object for stack"));
2105	MPASS((cow & MAP_REMAP) == 0 || (find_space == VMFS_NO_SPACE &&
2106	    (cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) == 0));
2107	if (find_space == VMFS_OPTIMAL_SPACE && (object == NULL ||
2108	    (object->flags & OBJ_COLORED) == 0))
2109		find_space = VMFS_ANY_SPACE;
2110	if (find_space >> 8 != 0) {
2111		KASSERT((find_space & 0xff) == 0, ("bad VMFS flags"));
2112		alignment = (vm_offset_t)1 << (find_space >> 8);
2113	} else
2114		alignment = 0;
2115	en_aslr = (map->flags & MAP_ASLR) != 0;
2116	update_anon = cluster = clustering_anon_allowed(*addr) &&
2117	    (map->flags & MAP_IS_SUB_MAP) == 0 && max_addr == 0 &&
2118	    find_space != VMFS_NO_SPACE && object == NULL &&
2119	    (cow & (MAP_INHERIT_SHARE | MAP_STACK_GROWS_UP |
2120	    MAP_STACK_GROWS_DOWN)) == 0 && prot != PROT_NONE;
2121	curr_min_addr = min_addr = *addr;
2122	if (en_aslr && min_addr == 0 && !cluster &&
2123	    find_space != VMFS_NO_SPACE &&
2124	    (map->flags & MAP_ASLR_IGNSTART) != 0)
2125		curr_min_addr = min_addr = vm_map_min(map);
2126	try = 0;
2127	vm_map_lock(map);
2128	if (cluster) {
2129		curr_min_addr = map->anon_loc;
2130		if (curr_min_addr == 0)
2131			cluster = false;
2132	}
2133	if (find_space != VMFS_NO_SPACE) {
2134		KASSERT(find_space == VMFS_ANY_SPACE ||
2135		    find_space == VMFS_OPTIMAL_SPACE ||
2136		    find_space == VMFS_SUPER_SPACE ||
2137		    alignment != 0, ("unexpected VMFS flag"));
2138again:
2139		/*
2140		 * When creating an anonymous mapping, try clustering
2141		 * with an existing anonymous mapping first.
2142		 *
2143		 * We make up to two attempts to find address space
2144		 * for a given find_space value. The first attempt may
2145		 * apply randomization or may cluster with an existing
2146		 * anonymous mapping. If this first attempt fails,
2147		 * perform a first-fit search of the available address
2148		 * space.
2149		 *
2150		 * If all tries failed, and find_space is
2151		 * VMFS_OPTIMAL_SPACE, fallback to VMFS_ANY_SPACE.
2152		 * Again enable clustering and randomization.
2153		 */
2154		try++;
2155		MPASS(try <= 2);
2156
2157		if (try == 2) {
2158			/*
2159			 * Second try: we failed either to find a
2160			 * suitable region for randomizing the
2161			 * allocation, or to cluster with an existing
2162			 * mapping.  Retry with free run.
2163			 */
2164			curr_min_addr = (map->flags & MAP_ASLR_IGNSTART) != 0 ?
2165			    vm_map_min(map) : min_addr;
2166			atomic_add_long(&aslr_restarts, 1);
2167		}
2168
2169		if (try == 1 && en_aslr && !cluster) {
2170			/*
2171			 * Find space for allocation, including
2172			 * gap needed for later randomization.
2173			 */
2174			pidx = MAXPAGESIZES > 1 && pagesizes[1] != 0 &&
2175			    (find_space == VMFS_SUPER_SPACE || find_space ==
2176			    VMFS_OPTIMAL_SPACE) ? 1 : 0;
2177			gap = vm_map_max(map) > MAP_32BIT_MAX_ADDR &&
2178			    (max_addr == 0 || max_addr > MAP_32BIT_MAX_ADDR) ?
2179			    aslr_pages_rnd_64[pidx] : aslr_pages_rnd_32[pidx];
2180			*addr = vm_map_findspace(map, curr_min_addr,
2181			    length + gap * pagesizes[pidx]);
2182			if (*addr + length + gap * pagesizes[pidx] >
2183			    vm_map_max(map))
2184				goto again;
2185			/* And randomize the start address. */
2186			*addr += (arc4random() % gap) * pagesizes[pidx];
2187			if (max_addr != 0 && *addr + length > max_addr)
2188				goto again;
2189		} else {
2190			*addr = vm_map_findspace(map, curr_min_addr, length);
2191			if (*addr + length > vm_map_max(map) ||
2192			    (max_addr != 0 && *addr + length > max_addr)) {
2193				if (cluster) {
2194					cluster = false;
2195					MPASS(try == 1);
2196					goto again;
2197				}
2198				rv = KERN_NO_SPACE;
2199				goto done;
2200			}
2201		}
2202
2203		if (find_space != VMFS_ANY_SPACE &&
2204		    (rv = vm_map_alignspace(map, object, offset, addr, length,
2205		    max_addr, alignment)) != KERN_SUCCESS) {
2206			if (find_space == VMFS_OPTIMAL_SPACE) {
2207				find_space = VMFS_ANY_SPACE;
2208				curr_min_addr = min_addr;
2209				cluster = update_anon;
2210				try = 0;
2211				goto again;
2212			}
2213			goto done;
2214		}
2215	} else if ((cow & MAP_REMAP) != 0) {
2216		if (!vm_map_range_valid(map, *addr, *addr + length)) {
2217			rv = KERN_INVALID_ADDRESS;
2218			goto done;
2219		}
2220		rv = vm_map_delete(map, *addr, *addr + length);
2221		if (rv != KERN_SUCCESS)
2222			goto done;
2223	}
2224	if ((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) != 0) {
2225		rv = vm_map_stack_locked(map, *addr, length, sgrowsiz, prot,
2226		    max, cow);
2227	} else {
2228		rv = vm_map_insert(map, object, offset, *addr, *addr + length,
2229		    prot, max, cow);
2230	}
2231	if (rv == KERN_SUCCESS && update_anon)
2232		map->anon_loc = *addr + length;
2233done:
2234	vm_map_unlock(map);
2235	return (rv);
2236}
2237
2238/*
2239 *	vm_map_find_min() is a variant of vm_map_find() that takes an
2240 *	additional parameter (min_addr) and treats the given address
2241 *	(*addr) differently.  Specifically, it treats *addr as a hint
2242 *	and not as the minimum address where the mapping is created.
2243 *
2244 *	This function works in two phases.  First, it tries to
2245 *	allocate above the hint.  If that fails and the hint is
2246 *	greater than min_addr, it performs a second pass, replacing
2247 *	the hint with min_addr as the minimum address for the
2248 *	allocation.
2249 */
2250int
2251vm_map_find_min(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
2252    vm_offset_t *addr, vm_size_t length, vm_offset_t min_addr,
2253    vm_offset_t max_addr, int find_space, vm_prot_t prot, vm_prot_t max,
2254    int cow)
2255{
2256	vm_offset_t hint;
2257	int rv;
2258
2259	hint = *addr;
2260	for (;;) {
2261		rv = vm_map_find(map, object, offset, addr, length, max_addr,
2262		    find_space, prot, max, cow);
2263		if (rv == KERN_SUCCESS || min_addr >= hint)
2264			return (rv);
2265		*addr = hint = min_addr;
2266	}
2267}
2268
2269/*
2270 * A map entry with any of the following flags set must not be merged with
2271 * another entry.
2272 */
2273#define	MAP_ENTRY_NOMERGE_MASK	(MAP_ENTRY_GROWS_DOWN | MAP_ENTRY_GROWS_UP | \
2274	    MAP_ENTRY_IN_TRANSITION | MAP_ENTRY_IS_SUB_MAP | MAP_ENTRY_VN_EXEC)
2275
2276static bool
2277vm_map_mergeable_neighbors(vm_map_entry_t prev, vm_map_entry_t entry)
2278{
2279
2280	KASSERT((prev->eflags & MAP_ENTRY_NOMERGE_MASK) == 0 ||
2281	    (entry->eflags & MAP_ENTRY_NOMERGE_MASK) == 0,
2282	    ("vm_map_mergeable_neighbors: neither %p nor %p are mergeable",
2283	    prev, entry));
2284	return (prev->end == entry->start &&
2285	    prev->object.vm_object == entry->object.vm_object &&
2286	    (prev->object.vm_object == NULL ||
2287	    prev->offset + (prev->end - prev->start) == entry->offset) &&
2288	    prev->eflags == entry->eflags &&
2289	    prev->protection == entry->protection &&
2290	    prev->max_protection == entry->max_protection &&
2291	    prev->inheritance == entry->inheritance &&
2292	    prev->wired_count == entry->wired_count &&
2293	    prev->cred == entry->cred);
2294}
2295
2296static void
2297vm_map_merged_neighbor_dispose(vm_map_t map, vm_map_entry_t entry)
2298{
2299
2300	/*
2301	 * If the backing object is a vnode object, vm_object_deallocate()
2302	 * calls vrele().  However, vrele() does not lock the vnode because
2303	 * the vnode has additional references.  Thus, the map lock can be
2304	 * kept without causing a lock-order reversal with the vnode lock.
2305	 *
2306	 * Since we count the number of virtual page mappings in
2307	 * object->un_pager.vnp.writemappings, the writemappings value
2308	 * should not be adjusted when the entry is disposed of.
2309	 */
2310	if (entry->object.vm_object != NULL)
2311		vm_object_deallocate(entry->object.vm_object);
2312	if (entry->cred != NULL)
2313		crfree(entry->cred);
2314	vm_map_entry_dispose(map, entry);
2315}
2316
2317/*
2318 *	vm_map_try_merge_entries:
2319 *
2320 *	Compare the given map entry to its predecessor, and merge its precessor
2321 *	into it if possible.  The entry remains valid, and may be extended.
2322 *	The predecessor may be deleted.
2323 *
2324 *	The map must be locked.
2325 */
2326void
2327vm_map_try_merge_entries(vm_map_t map, vm_map_entry_t prev_entry,
2328    vm_map_entry_t entry)
2329{
2330
2331	VM_MAP_ASSERT_LOCKED(map);
2332	if ((entry->eflags & MAP_ENTRY_NOMERGE_MASK) == 0 &&
2333	    vm_map_mergeable_neighbors(prev_entry, entry)) {
2334		vm_map_entry_unlink(map, prev_entry, UNLINK_MERGE_NEXT);
2335		vm_map_merged_neighbor_dispose(map, prev_entry);
2336	}
2337}
2338
2339/*
2340 *	vm_map_entry_back:
2341 *
2342 *	Allocate an object to back a map entry.
2343 */
2344static inline void
2345vm_map_entry_back(vm_map_entry_t entry)
2346{
2347	vm_object_t object;
2348
2349	KASSERT(entry->object.vm_object == NULL,
2350	    ("map entry %p has backing object", entry));
2351	KASSERT((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0,
2352	    ("map entry %p is a submap", entry));
2353	object = vm_object_allocate_anon(atop(entry->end - entry->start), NULL,
2354	    entry->cred, entry->end - entry->start);
2355	entry->object.vm_object = object;
2356	entry->offset = 0;
2357	entry->cred = NULL;
2358}
2359
2360/*
2361 *	vm_map_entry_charge_object
2362 *
2363 *	If there is no object backing this entry, create one.  Otherwise, if
2364 *	the entry has cred, give it to the backing object.
2365 */
2366static inline void
2367vm_map_entry_charge_object(vm_map_t map, vm_map_entry_t entry)
2368{
2369
2370	VM_MAP_ASSERT_LOCKED(map);
2371	KASSERT((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0,
2372	    ("map entry %p is a submap", entry));
2373	if (entry->object.vm_object == NULL && !map->system_map &&
2374	    (entry->eflags & MAP_ENTRY_GUARD) == 0)
2375		vm_map_entry_back(entry);
2376	else if (entry->object.vm_object != NULL &&
2377	    ((entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) &&
2378	    entry->cred != NULL) {
2379		VM_OBJECT_WLOCK(entry->object.vm_object);
2380		KASSERT(entry->object.vm_object->cred == NULL,
2381		    ("OVERCOMMIT: %s: both cred e %p", __func__, entry));
2382		entry->object.vm_object->cred = entry->cred;
2383		entry->object.vm_object->charge = entry->end - entry->start;
2384		VM_OBJECT_WUNLOCK(entry->object.vm_object);
2385		entry->cred = NULL;
2386	}
2387}
2388
2389/*
2390 *	vm_map_entry_clone
2391 *
2392 *	Create a duplicate map entry for clipping.
2393 */
2394static vm_map_entry_t
2395vm_map_entry_clone(vm_map_t map, vm_map_entry_t entry)
2396{
2397	vm_map_entry_t new_entry;
2398
2399	VM_MAP_ASSERT_LOCKED(map);
2400
2401	/*
2402	 * Create a backing object now, if none exists, so that more individual
2403	 * objects won't be created after the map entry is split.
2404	 */
2405	vm_map_entry_charge_object(map, entry);
2406
2407	/* Clone the entry. */
2408	new_entry = vm_map_entry_create(map);
2409	*new_entry = *entry;
2410	if (new_entry->cred != NULL)
2411		crhold(entry->cred);
2412	if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
2413		vm_object_reference(new_entry->object.vm_object);
2414		vm_map_entry_set_vnode_text(new_entry, true);
2415		/*
2416		 * The object->un_pager.vnp.writemappings for the object of
2417		 * MAP_ENTRY_WRITECNT type entry shall be kept as is here.  The
2418		 * virtual pages are re-distributed among the clipped entries,
2419		 * so the sum is left the same.
2420		 */
2421	}
2422	return (new_entry);
2423}
2424
2425/*
2426 *	vm_map_clip_start:	[ internal use only ]
2427 *
2428 *	Asserts that the given entry begins at or after
2429 *	the specified address; if necessary,
2430 *	it splits the entry into two.
2431 */
2432static int
2433vm_map_clip_start(vm_map_t map, vm_map_entry_t entry, vm_offset_t startaddr)
2434{
2435	vm_map_entry_t new_entry;
2436	int bdry_idx;
2437
2438	if (!map->system_map)
2439		WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
2440		    "%s: map %p entry %p start 0x%jx", __func__, map, entry,
2441		    (uintmax_t)startaddr);
2442
2443	if (startaddr <= entry->start)
2444		return (KERN_SUCCESS);
2445
2446	VM_MAP_ASSERT_LOCKED(map);
2447	KASSERT(entry->end > startaddr && entry->start < startaddr,
2448	    ("%s: invalid clip of entry %p", __func__, entry));
2449
2450	bdry_idx = (entry->eflags & MAP_ENTRY_SPLIT_BOUNDARY_MASK) >>
2451	    MAP_ENTRY_SPLIT_BOUNDARY_SHIFT;
2452	if (bdry_idx != 0) {
2453		if ((startaddr & (pagesizes[bdry_idx] - 1)) != 0)
2454			return (KERN_INVALID_ARGUMENT);
2455	}
2456
2457	new_entry = vm_map_entry_clone(map, entry);
2458
2459	/*
2460	 * Split off the front portion.  Insert the new entry BEFORE this one,
2461	 * so that this entry has the specified starting address.
2462	 */
2463	new_entry->end = startaddr;
2464	vm_map_entry_link(map, new_entry);
2465	return (KERN_SUCCESS);
2466}
2467
2468/*
2469 *	vm_map_lookup_clip_start:
2470 *
2471 *	Find the entry at or just after 'start', and clip it if 'start' is in
2472 *	the interior of the entry.  Return entry after 'start', and in
2473 *	prev_entry set the entry before 'start'.
2474 */
2475static int
2476vm_map_lookup_clip_start(vm_map_t map, vm_offset_t start,
2477    vm_map_entry_t *res_entry, vm_map_entry_t *prev_entry)
2478{
2479	vm_map_entry_t entry;
2480	int rv;
2481
2482	if (!map->system_map)
2483		WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
2484		    "%s: map %p start 0x%jx prev %p", __func__, map,
2485		    (uintmax_t)start, prev_entry);
2486
2487	if (vm_map_lookup_entry(map, start, prev_entry)) {
2488		entry = *prev_entry;
2489		rv = vm_map_clip_start(map, entry, start);
2490		if (rv != KERN_SUCCESS)
2491			return (rv);
2492		*prev_entry = vm_map_entry_pred(entry);
2493	} else
2494		entry = vm_map_entry_succ(*prev_entry);
2495	*res_entry = entry;
2496	return (KERN_SUCCESS);
2497}
2498
2499/*
2500 *	vm_map_clip_end:	[ internal use only ]
2501 *
2502 *	Asserts that the given entry ends at or before
2503 *	the specified address; if necessary,
2504 *	it splits the entry into two.
2505 */
2506static int
2507vm_map_clip_end(vm_map_t map, vm_map_entry_t entry, vm_offset_t endaddr)
2508{
2509	vm_map_entry_t new_entry;
2510	int bdry_idx;
2511
2512	if (!map->system_map)
2513		WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
2514		    "%s: map %p entry %p end 0x%jx", __func__, map, entry,
2515		    (uintmax_t)endaddr);
2516
2517	if (endaddr >= entry->end)
2518		return (KERN_SUCCESS);
2519
2520	VM_MAP_ASSERT_LOCKED(map);
2521	KASSERT(entry->start < endaddr && entry->end > endaddr,
2522	    ("%s: invalid clip of entry %p", __func__, entry));
2523
2524	bdry_idx = (entry->eflags & MAP_ENTRY_SPLIT_BOUNDARY_MASK) >>
2525	    MAP_ENTRY_SPLIT_BOUNDARY_SHIFT;
2526	if (bdry_idx != 0) {
2527		if ((endaddr & (pagesizes[bdry_idx] - 1)) != 0)
2528			return (KERN_INVALID_ARGUMENT);
2529	}
2530
2531	new_entry = vm_map_entry_clone(map, entry);
2532
2533	/*
2534	 * Split off the back portion.  Insert the new entry AFTER this one,
2535	 * so that this entry has the specified ending address.
2536	 */
2537	new_entry->start = endaddr;
2538	vm_map_entry_link(map, new_entry);
2539
2540	return (KERN_SUCCESS);
2541}
2542
2543/*
2544 *	vm_map_submap:		[ kernel use only ]
2545 *
2546 *	Mark the given range as handled by a subordinate map.
2547 *
2548 *	This range must have been created with vm_map_find,
2549 *	and no other operations may have been performed on this
2550 *	range prior to calling vm_map_submap.
2551 *
2552 *	Only a limited number of operations can be performed
2553 *	within this rage after calling vm_map_submap:
2554 *		vm_fault
2555 *	[Don't try vm_map_copy!]
2556 *
2557 *	To remove a submapping, one must first remove the
2558 *	range from the superior map, and then destroy the
2559 *	submap (if desired).  [Better yet, don't try it.]
2560 */
2561int
2562vm_map_submap(
2563	vm_map_t map,
2564	vm_offset_t start,
2565	vm_offset_t end,
2566	vm_map_t submap)
2567{
2568	vm_map_entry_t entry;
2569	int result;
2570
2571	result = KERN_INVALID_ARGUMENT;
2572
2573	vm_map_lock(submap);
2574	submap->flags |= MAP_IS_SUB_MAP;
2575	vm_map_unlock(submap);
2576
2577	vm_map_lock(map);
2578	VM_MAP_RANGE_CHECK(map, start, end);
2579	if (vm_map_lookup_entry(map, start, &entry) && entry->end >= end &&
2580	    (entry->eflags & MAP_ENTRY_COW) == 0 &&
2581	    entry->object.vm_object == NULL) {
2582		result = vm_map_clip_start(map, entry, start);
2583		if (result != KERN_SUCCESS)
2584			goto unlock;
2585		result = vm_map_clip_end(map, entry, end);
2586		if (result != KERN_SUCCESS)
2587			goto unlock;
2588		entry->object.sub_map = submap;
2589		entry->eflags |= MAP_ENTRY_IS_SUB_MAP;
2590		result = KERN_SUCCESS;
2591	}
2592unlock:
2593	vm_map_unlock(map);
2594
2595	if (result != KERN_SUCCESS) {
2596		vm_map_lock(submap);
2597		submap->flags &= ~MAP_IS_SUB_MAP;
2598		vm_map_unlock(submap);
2599	}
2600	return (result);
2601}
2602
2603/*
2604 * The maximum number of pages to map if MAP_PREFAULT_PARTIAL is specified
2605 */
2606#define	MAX_INIT_PT	96
2607
2608/*
2609 *	vm_map_pmap_enter:
2610 *
2611 *	Preload the specified map's pmap with mappings to the specified
2612 *	object's memory-resident pages.  No further physical pages are
2613 *	allocated, and no further virtual pages are retrieved from secondary
2614 *	storage.  If the specified flags include MAP_PREFAULT_PARTIAL, then a
2615 *	limited number of page mappings are created at the low-end of the
2616 *	specified address range.  (For this purpose, a superpage mapping
2617 *	counts as one page mapping.)  Otherwise, all resident pages within
2618 *	the specified address range are mapped.
2619 */
2620static void
2621vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot,
2622    vm_object_t object, vm_pindex_t pindex, vm_size_t size, int flags)
2623{
2624	vm_offset_t start;
2625	vm_page_t p, p_start;
2626	vm_pindex_t mask, psize, threshold, tmpidx;
2627
2628	if ((prot & (VM_PROT_READ | VM_PROT_EXECUTE)) == 0 || object == NULL)
2629		return;
2630	if (object->type == OBJT_DEVICE || object->type == OBJT_SG) {
2631		VM_OBJECT_WLOCK(object);
2632		if (object->type == OBJT_DEVICE || object->type == OBJT_SG) {
2633			pmap_object_init_pt(map->pmap, addr, object, pindex,
2634			    size);
2635			VM_OBJECT_WUNLOCK(object);
2636			return;
2637		}
2638		VM_OBJECT_LOCK_DOWNGRADE(object);
2639	} else
2640		VM_OBJECT_RLOCK(object);
2641
2642	psize = atop(size);
2643	if (psize + pindex > object->size) {
2644		if (pindex >= object->size) {
2645			VM_OBJECT_RUNLOCK(object);
2646			return;
2647		}
2648		psize = object->size - pindex;
2649	}
2650
2651	start = 0;
2652	p_start = NULL;
2653	threshold = MAX_INIT_PT;
2654
2655	p = vm_page_find_least(object, pindex);
2656	/*
2657	 * Assert: the variable p is either (1) the page with the
2658	 * least pindex greater than or equal to the parameter pindex
2659	 * or (2) NULL.
2660	 */
2661	for (;
2662	     p != NULL && (tmpidx = p->pindex - pindex) < psize;
2663	     p = TAILQ_NEXT(p, listq)) {
2664		/*
2665		 * don't allow an madvise to blow away our really
2666		 * free pages allocating pv entries.
2667		 */
2668		if (((flags & MAP_PREFAULT_MADVISE) != 0 &&
2669		    vm_page_count_severe()) ||
2670		    ((flags & MAP_PREFAULT_PARTIAL) != 0 &&
2671		    tmpidx >= threshold)) {
2672			psize = tmpidx;
2673			break;
2674		}
2675		if (vm_page_all_valid(p)) {
2676			if (p_start == NULL) {
2677				start = addr + ptoa(tmpidx);
2678				p_start = p;
2679			}
2680			/* Jump ahead if a superpage mapping is possible. */
2681			if (p->psind > 0 && ((addr + ptoa(tmpidx)) &
2682			    (pagesizes[p->psind] - 1)) == 0) {
2683				mask = atop(pagesizes[p->psind]) - 1;
2684				if (tmpidx + mask < psize &&
2685				    vm_page_ps_test(p, PS_ALL_VALID, NULL)) {
2686					p += mask;
2687					threshold += mask;
2688				}
2689			}
2690		} else if (p_start != NULL) {
2691			pmap_enter_object(map->pmap, start, addr +
2692			    ptoa(tmpidx), p_start, prot);
2693			p_start = NULL;
2694		}
2695	}
2696	if (p_start != NULL)
2697		pmap_enter_object(map->pmap, start, addr + ptoa(psize),
2698		    p_start, prot);
2699	VM_OBJECT_RUNLOCK(object);
2700}
2701
2702/*
2703 *	vm_map_protect:
2704 *
2705 *	Sets the protection and/or the maximum protection of the
2706 *	specified address region in the target map.
2707 */
2708int
2709vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
2710    vm_prot_t new_prot, vm_prot_t new_maxprot, int flags)
2711{
2712	vm_map_entry_t entry, first_entry, in_tran, prev_entry;
2713	vm_object_t obj;
2714	struct ucred *cred;
2715	vm_prot_t old_prot;
2716	int rv;
2717
2718	if (start == end)
2719		return (KERN_SUCCESS);
2720
2721	if ((flags & (VM_MAP_PROTECT_SET_PROT | VM_MAP_PROTECT_SET_MAXPROT)) ==
2722	    (VM_MAP_PROTECT_SET_PROT | VM_MAP_PROTECT_SET_MAXPROT) &&
2723	    (new_prot & new_maxprot) != new_prot)
2724		return (KERN_OUT_OF_BOUNDS);
2725
2726again:
2727	in_tran = NULL;
2728	vm_map_lock(map);
2729
2730	if ((map->flags & MAP_WXORX) != 0 &&
2731	    (flags & VM_MAP_PROTECT_SET_PROT) != 0 &&
2732	    (new_prot & (VM_PROT_WRITE | VM_PROT_EXECUTE)) == (VM_PROT_WRITE |
2733	    VM_PROT_EXECUTE)) {
2734		vm_map_unlock(map);
2735		return (KERN_PROTECTION_FAILURE);
2736	}
2737
2738	/*
2739	 * Ensure that we are not concurrently wiring pages.  vm_map_wire() may
2740	 * need to fault pages into the map and will drop the map lock while
2741	 * doing so, and the VM object may end up in an inconsistent state if we
2742	 * update the protection on the map entry in between faults.
2743	 */
2744	vm_map_wait_busy(map);
2745
2746	VM_MAP_RANGE_CHECK(map, start, end);
2747
2748	if (!vm_map_lookup_entry(map, start, &first_entry))
2749		first_entry = vm_map_entry_succ(first_entry);
2750
2751	/*
2752	 * Make a first pass to check for protection violations.
2753	 */
2754	for (entry = first_entry; entry->start < end;
2755	    entry = vm_map_entry_succ(entry)) {
2756		if ((entry->eflags & MAP_ENTRY_GUARD) != 0)
2757			continue;
2758		if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0) {
2759			vm_map_unlock(map);
2760			return (KERN_INVALID_ARGUMENT);
2761		}
2762		if ((flags & VM_MAP_PROTECT_SET_PROT) == 0)
2763			new_prot = entry->protection;
2764		if ((flags & VM_MAP_PROTECT_SET_MAXPROT) == 0)
2765			new_maxprot = entry->max_protection;
2766		if ((new_prot & entry->max_protection) != new_prot ||
2767		    (new_maxprot & entry->max_protection) != new_maxprot) {
2768			vm_map_unlock(map);
2769			return (KERN_PROTECTION_FAILURE);
2770		}
2771		if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0)
2772			in_tran = entry;
2773	}
2774
2775	/*
2776	 * Postpone the operation until all in-transition map entries have
2777	 * stabilized.  An in-transition entry might already have its pages
2778	 * wired and wired_count incremented, but not yet have its
2779	 * MAP_ENTRY_USER_WIRED flag set.  In which case, we would fail to call
2780	 * vm_fault_copy_entry() in the final loop below.
2781	 */
2782	if (in_tran != NULL) {
2783		in_tran->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
2784		vm_map_unlock_and_wait(map, 0);
2785		goto again;
2786	}
2787
2788	/*
2789	 * Before changing the protections, try to reserve swap space for any
2790	 * private (i.e., copy-on-write) mappings that are transitioning from
2791	 * read-only to read/write access.  If a reservation fails, break out
2792	 * of this loop early and let the next loop simplify the entries, since
2793	 * some may now be mergeable.
2794	 */
2795	rv = vm_map_clip_start(map, first_entry, start);
2796	if (rv != KERN_SUCCESS) {
2797		vm_map_unlock(map);
2798		return (rv);
2799	}
2800	for (entry = first_entry; entry->start < end;
2801	    entry = vm_map_entry_succ(entry)) {
2802		rv = vm_map_clip_end(map, entry, end);
2803		if (rv != KERN_SUCCESS) {
2804			vm_map_unlock(map);
2805			return (rv);
2806		}
2807
2808		if ((flags & VM_MAP_PROTECT_SET_PROT) == 0 ||
2809		    ((new_prot & ~entry->protection) & VM_PROT_WRITE) == 0 ||
2810		    ENTRY_CHARGED(entry) ||
2811		    (entry->eflags & MAP_ENTRY_GUARD) != 0)
2812			continue;
2813
2814		cred = curthread->td_ucred;
2815		obj = entry->object.vm_object;
2816
2817		if (obj == NULL ||
2818		    (entry->eflags & MAP_ENTRY_NEEDS_COPY) != 0) {
2819			if (!swap_reserve(entry->end - entry->start)) {
2820				rv = KERN_RESOURCE_SHORTAGE;
2821				end = entry->end;
2822				break;
2823			}
2824			crhold(cred);
2825			entry->cred = cred;
2826			continue;
2827		}
2828
2829		if (obj->type != OBJT_DEFAULT &&
2830		    (obj->flags & OBJ_SWAP) == 0)
2831			continue;
2832		VM_OBJECT_WLOCK(obj);
2833		if (obj->type != OBJT_DEFAULT &&
2834		    (obj->flags & OBJ_SWAP) == 0) {
2835			VM_OBJECT_WUNLOCK(obj);
2836			continue;
2837		}
2838
2839		/*
2840		 * Charge for the whole object allocation now, since
2841		 * we cannot distinguish between non-charged and
2842		 * charged clipped mapping of the same object later.
2843		 */
2844		KASSERT(obj->charge == 0,
2845		    ("vm_map_protect: object %p overcharged (entry %p)",
2846		    obj, entry));
2847		if (!swap_reserve(ptoa(obj->size))) {
2848			VM_OBJECT_WUNLOCK(obj);
2849			rv = KERN_RESOURCE_SHORTAGE;
2850			end = entry->end;
2851			break;
2852		}
2853
2854		crhold(cred);
2855		obj->cred = cred;
2856		obj->charge = ptoa(obj->size);
2857		VM_OBJECT_WUNLOCK(obj);
2858	}
2859
2860	/*
2861	 * If enough swap space was available, go back and fix up protections.
2862	 * Otherwise, just simplify entries, since some may have been modified.
2863	 * [Note that clipping is not necessary the second time.]
2864	 */
2865	for (prev_entry = vm_map_entry_pred(first_entry), entry = first_entry;
2866	    entry->start < end;
2867	    vm_map_try_merge_entries(map, prev_entry, entry),
2868	    prev_entry = entry, entry = vm_map_entry_succ(entry)) {
2869		if (rv != KERN_SUCCESS ||
2870		    (entry->eflags & MAP_ENTRY_GUARD) != 0)
2871			continue;
2872
2873		old_prot = entry->protection;
2874
2875		if ((flags & VM_MAP_PROTECT_SET_MAXPROT) != 0) {
2876			entry->max_protection = new_maxprot;
2877			entry->protection = new_maxprot & old_prot;
2878		}
2879		if ((flags & VM_MAP_PROTECT_SET_PROT) != 0)
2880			entry->protection = new_prot;
2881
2882		/*
2883		 * For user wired map entries, the normal lazy evaluation of
2884		 * write access upgrades through soft page faults is
2885		 * undesirable.  Instead, immediately copy any pages that are
2886		 * copy-on-write and enable write access in the physical map.
2887		 */
2888		if ((entry->eflags & MAP_ENTRY_USER_WIRED) != 0 &&
2889		    (entry->protection & VM_PROT_WRITE) != 0 &&
2890		    (old_prot & VM_PROT_WRITE) == 0)
2891			vm_fault_copy_entry(map, map, entry, entry, NULL);
2892
2893		/*
2894		 * When restricting access, update the physical map.  Worry
2895		 * about copy-on-write here.
2896		 */
2897		if ((old_prot & ~entry->protection) != 0) {
2898#define MASK(entry)	(((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \
2899							VM_PROT_ALL)
2900			pmap_protect(map->pmap, entry->start,
2901			    entry->end,
2902			    entry->protection & MASK(entry));
2903#undef	MASK
2904		}
2905	}
2906	vm_map_try_merge_entries(map, prev_entry, entry);
2907	vm_map_unlock(map);
2908	return (rv);
2909}
2910
2911/*
2912 *	vm_map_madvise:
2913 *
2914 *	This routine traverses a processes map handling the madvise
2915 *	system call.  Advisories are classified as either those effecting
2916 *	the vm_map_entry structure, or those effecting the underlying
2917 *	objects.
2918 */
2919int
2920vm_map_madvise(
2921	vm_map_t map,
2922	vm_offset_t start,
2923	vm_offset_t end,
2924	int behav)
2925{
2926	vm_map_entry_t entry, prev_entry;
2927	int rv;
2928	bool modify_map;
2929
2930	/*
2931	 * Some madvise calls directly modify the vm_map_entry, in which case
2932	 * we need to use an exclusive lock on the map and we need to perform
2933	 * various clipping operations.  Otherwise we only need a read-lock
2934	 * on the map.
2935	 */
2936	switch(behav) {
2937	case MADV_NORMAL:
2938	case MADV_SEQUENTIAL:
2939	case MADV_RANDOM:
2940	case MADV_NOSYNC:
2941	case MADV_AUTOSYNC:
2942	case MADV_NOCORE:
2943	case MADV_CORE:
2944		if (start == end)
2945			return (0);
2946		modify_map = true;
2947		vm_map_lock(map);
2948		break;
2949	case MADV_WILLNEED:
2950	case MADV_DONTNEED:
2951	case MADV_FREE:
2952		if (start == end)
2953			return (0);
2954		modify_map = false;
2955		vm_map_lock_read(map);
2956		break;
2957	default:
2958		return (EINVAL);
2959	}
2960
2961	/*
2962	 * Locate starting entry and clip if necessary.
2963	 */
2964	VM_MAP_RANGE_CHECK(map, start, end);
2965
2966	if (modify_map) {
2967		/*
2968		 * madvise behaviors that are implemented in the vm_map_entry.
2969		 *
2970		 * We clip the vm_map_entry so that behavioral changes are
2971		 * limited to the specified address range.
2972		 */
2973		rv = vm_map_lookup_clip_start(map, start, &entry, &prev_entry);
2974		if (rv != KERN_SUCCESS) {
2975			vm_map_unlock(map);
2976			return (vm_mmap_to_errno(rv));
2977		}
2978
2979		for (; entry->start < end; prev_entry = entry,
2980		    entry = vm_map_entry_succ(entry)) {
2981			if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0)
2982				continue;
2983
2984			rv = vm_map_clip_end(map, entry, end);
2985			if (rv != KERN_SUCCESS) {
2986				vm_map_unlock(map);
2987				return (vm_mmap_to_errno(rv));
2988			}
2989
2990			switch (behav) {
2991			case MADV_NORMAL:
2992				vm_map_entry_set_behavior(entry,
2993				    MAP_ENTRY_BEHAV_NORMAL);
2994				break;
2995			case MADV_SEQUENTIAL:
2996				vm_map_entry_set_behavior(entry,
2997				    MAP_ENTRY_BEHAV_SEQUENTIAL);
2998				break;
2999			case MADV_RANDOM:
3000				vm_map_entry_set_behavior(entry,
3001				    MAP_ENTRY_BEHAV_RANDOM);
3002				break;
3003			case MADV_NOSYNC:
3004				entry->eflags |= MAP_ENTRY_NOSYNC;
3005				break;
3006			case MADV_AUTOSYNC:
3007				entry->eflags &= ~MAP_ENTRY_NOSYNC;
3008				break;
3009			case MADV_NOCORE:
3010				entry->eflags |= MAP_ENTRY_NOCOREDUMP;
3011				break;
3012			case MADV_CORE:
3013				entry->eflags &= ~MAP_ENTRY_NOCOREDUMP;
3014				break;
3015			default:
3016				break;
3017			}
3018			vm_map_try_merge_entries(map, prev_entry, entry);
3019		}
3020		vm_map_try_merge_entries(map, prev_entry, entry);
3021		vm_map_unlock(map);
3022	} else {
3023		vm_pindex_t pstart, pend;
3024
3025		/*
3026		 * madvise behaviors that are implemented in the underlying
3027		 * vm_object.
3028		 *
3029		 * Since we don't clip the vm_map_entry, we have to clip
3030		 * the vm_object pindex and count.
3031		 */
3032		if (!vm_map_lookup_entry(map, start, &entry))
3033			entry = vm_map_entry_succ(entry);
3034		for (; entry->start < end;
3035		    entry = vm_map_entry_succ(entry)) {
3036			vm_offset_t useEnd, useStart;
3037
3038			if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0)
3039				continue;
3040
3041			/*
3042			 * MADV_FREE would otherwise rewind time to
3043			 * the creation of the shadow object.  Because
3044			 * we hold the VM map read-locked, neither the
3045			 * entry's object nor the presence of a
3046			 * backing object can change.
3047			 */
3048			if (behav == MADV_FREE &&
3049			    entry->object.vm_object != NULL &&
3050			    entry->object.vm_object->backing_object != NULL)
3051				continue;
3052
3053			pstart = OFF_TO_IDX(entry->offset);
3054			pend = pstart + atop(entry->end - entry->start);
3055			useStart = entry->start;
3056			useEnd = entry->end;
3057
3058			if (entry->start < start) {
3059				pstart += atop(start - entry->start);
3060				useStart = start;
3061			}
3062			if (entry->end > end) {
3063				pend -= atop(entry->end - end);
3064				useEnd = end;
3065			}
3066
3067			if (pstart >= pend)
3068				continue;
3069
3070			/*
3071			 * Perform the pmap_advise() before clearing
3072			 * PGA_REFERENCED in vm_page_advise().  Otherwise, a
3073			 * concurrent pmap operation, such as pmap_remove(),
3074			 * could clear a reference in the pmap and set
3075			 * PGA_REFERENCED on the page before the pmap_advise()
3076			 * had completed.  Consequently, the page would appear
3077			 * referenced based upon an old reference that
3078			 * occurred before this pmap_advise() ran.
3079			 */
3080			if (behav == MADV_DONTNEED || behav == MADV_FREE)
3081				pmap_advise(map->pmap, useStart, useEnd,
3082				    behav);
3083
3084			vm_object_madvise(entry->object.vm_object, pstart,
3085			    pend, behav);
3086
3087			/*
3088			 * Pre-populate paging structures in the
3089			 * WILLNEED case.  For wired entries, the
3090			 * paging structures are already populated.
3091			 */
3092			if (behav == MADV_WILLNEED &&
3093			    entry->wired_count == 0) {
3094				vm_map_pmap_enter(map,
3095				    useStart,
3096				    entry->protection,
3097				    entry->object.vm_object,
3098				    pstart,
3099				    ptoa(pend - pstart),
3100				    MAP_PREFAULT_MADVISE
3101				);
3102			}
3103		}
3104		vm_map_unlock_read(map);
3105	}
3106	return (0);
3107}
3108
3109/*
3110 *	vm_map_inherit:
3111 *
3112 *	Sets the inheritance of the specified address
3113 *	range in the target map.  Inheritance
3114 *	affects how the map will be shared with
3115 *	child maps at the time of vmspace_fork.
3116 */
3117int
3118vm_map_inherit(vm_map_t map, vm_offset_t start, vm_offset_t end,
3119	       vm_inherit_t new_inheritance)
3120{
3121	vm_map_entry_t entry, lentry, prev_entry, start_entry;
3122	int rv;
3123
3124	switch (new_inheritance) {
3125	case VM_INHERIT_NONE:
3126	case VM_INHERIT_COPY:
3127	case VM_INHERIT_SHARE:
3128	case VM_INHERIT_ZERO:
3129		break;
3130	default:
3131		return (KERN_INVALID_ARGUMENT);
3132	}
3133	if (start == end)
3134		return (KERN_SUCCESS);
3135	vm_map_lock(map);
3136	VM_MAP_RANGE_CHECK(map, start, end);
3137	rv = vm_map_lookup_clip_start(map, start, &start_entry, &prev_entry);
3138	if (rv != KERN_SUCCESS)
3139		goto unlock;
3140	if (vm_map_lookup_entry(map, end - 1, &lentry)) {
3141		rv = vm_map_clip_end(map, lentry, end);
3142		if (rv != KERN_SUCCESS)
3143			goto unlock;
3144	}
3145	if (new_inheritance == VM_INHERIT_COPY) {
3146		for (entry = start_entry; entry->start < end;
3147		    prev_entry = entry, entry = vm_map_entry_succ(entry)) {
3148			if ((entry->eflags & MAP_ENTRY_SPLIT_BOUNDARY_MASK)
3149			    != 0) {
3150				rv = KERN_INVALID_ARGUMENT;
3151				goto unlock;
3152			}
3153		}
3154	}
3155	for (entry = start_entry; entry->start < end; prev_entry = entry,
3156	    entry = vm_map_entry_succ(entry)) {
3157		KASSERT(entry->end <= end, ("non-clipped entry %p end %jx %jx",
3158		    entry, (uintmax_t)entry->end, (uintmax_t)end));
3159		if ((entry->eflags & MAP_ENTRY_GUARD) == 0 ||
3160		    new_inheritance != VM_INHERIT_ZERO)
3161			entry->inheritance = new_inheritance;
3162		vm_map_try_merge_entries(map, prev_entry, entry);
3163	}
3164	vm_map_try_merge_entries(map, prev_entry, entry);
3165unlock:
3166	vm_map_unlock(map);
3167	return (rv);
3168}
3169
3170/*
3171 *	vm_map_entry_in_transition:
3172 *
3173 *	Release the map lock, and sleep until the entry is no longer in
3174 *	transition.  Awake and acquire the map lock.  If the map changed while
3175 *	another held the lock, lookup a possibly-changed entry at or after the
3176 *	'start' position of the old entry.
3177 */
3178static vm_map_entry_t
3179vm_map_entry_in_transition(vm_map_t map, vm_offset_t in_start,
3180    vm_offset_t *io_end, bool holes_ok, vm_map_entry_t in_entry)
3181{
3182	vm_map_entry_t entry;
3183	vm_offset_t start;
3184	u_int last_timestamp;
3185
3186	VM_MAP_ASSERT_LOCKED(map);
3187	KASSERT((in_entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0,
3188	    ("not in-tranition map entry %p", in_entry));
3189	/*
3190	 * We have not yet clipped the entry.
3191	 */
3192	start = MAX(in_start, in_entry->start);
3193	in_entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
3194	last_timestamp = map->timestamp;
3195	if (vm_map_unlock_and_wait(map, 0)) {
3196		/*
3197		 * Allow interruption of user wiring/unwiring?
3198		 */
3199	}
3200	vm_map_lock(map);
3201	if (last_timestamp + 1 == map->timestamp)
3202		return (in_entry);
3203
3204	/*
3205	 * Look again for the entry because the map was modified while it was
3206	 * unlocked.  Specifically, the entry may have been clipped, merged, or
3207	 * deleted.
3208	 */
3209	if (!vm_map_lookup_entry(map, start, &entry)) {
3210		if (!holes_ok) {
3211			*io_end = start;
3212			return (NULL);
3213		}
3214		entry = vm_map_entry_succ(entry);
3215	}
3216	return (entry);
3217}
3218
3219/*
3220 *	vm_map_unwire:
3221 *
3222 *	Implements both kernel and user unwiring.
3223 */
3224int
3225vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end,
3226    int flags)
3227{
3228	vm_map_entry_t entry, first_entry, next_entry, prev_entry;
3229	int rv;
3230	bool holes_ok, need_wakeup, user_unwire;
3231
3232	if (start == end)
3233		return (KERN_SUCCESS);
3234	holes_ok = (flags & VM_MAP_WIRE_HOLESOK) != 0;
3235	user_unwire = (flags & VM_MAP_WIRE_USER) != 0;
3236	vm_map_lock(map);
3237	VM_MAP_RANGE_CHECK(map, start, end);
3238	if (!vm_map_lookup_entry(map, start, &first_entry)) {
3239		if (holes_ok)
3240			first_entry = vm_map_entry_succ(first_entry);
3241		else {
3242			vm_map_unlock(map);
3243			return (KERN_INVALID_ADDRESS);
3244		}
3245	}
3246	rv = KERN_SUCCESS;
3247	for (entry = first_entry; entry->start < end; entry = next_entry) {
3248		if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
3249			/*
3250			 * We have not yet clipped the entry.
3251			 */
3252			next_entry = vm_map_entry_in_transition(map, start,
3253			    &end, holes_ok, entry);
3254			if (next_entry == NULL) {
3255				if (entry == first_entry) {
3256					vm_map_unlock(map);
3257					return (KERN_INVALID_ADDRESS);
3258				}
3259				rv = KERN_INVALID_ADDRESS;
3260				break;
3261			}
3262			first_entry = (entry == first_entry) ?
3263			    next_entry : NULL;
3264			continue;
3265		}
3266		rv = vm_map_clip_start(map, entry, start);
3267		if (rv != KERN_SUCCESS)
3268			break;
3269		rv = vm_map_clip_end(map, entry, end);
3270		if (rv != KERN_SUCCESS)
3271			break;
3272
3273		/*
3274		 * Mark the entry in case the map lock is released.  (See
3275		 * above.)
3276		 */
3277		KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 &&
3278		    entry->wiring_thread == NULL,
3279		    ("owned map entry %p", entry));
3280		entry->eflags |= MAP_ENTRY_IN_TRANSITION;
3281		entry->wiring_thread = curthread;
3282		next_entry = vm_map_entry_succ(entry);
3283		/*
3284		 * Check the map for holes in the specified region.
3285		 * If holes_ok, skip this check.
3286		 */
3287		if (!holes_ok &&
3288		    entry->end < end && next_entry->start > entry->end) {
3289			end = entry->end;
3290			rv = KERN_INVALID_ADDRESS;
3291			break;
3292		}
3293		/*
3294		 * If system unwiring, require that the entry is system wired.
3295		 */
3296		if (!user_unwire &&
3297		    vm_map_entry_system_wired_count(entry) == 0) {
3298			end = entry->end;
3299			rv = KERN_INVALID_ARGUMENT;
3300			break;
3301		}
3302	}
3303	need_wakeup = false;
3304	if (first_entry == NULL &&
3305	    !vm_map_lookup_entry(map, start, &first_entry)) {
3306		KASSERT(holes_ok, ("vm_map_unwire: lookup failed"));
3307		prev_entry = first_entry;
3308		entry = vm_map_entry_succ(first_entry);
3309	} else {
3310		prev_entry = vm_map_entry_pred(first_entry);
3311		entry = first_entry;
3312	}
3313	for (; entry->start < end;
3314	    prev_entry = entry, entry = vm_map_entry_succ(entry)) {
3315		/*
3316		 * If holes_ok was specified, an empty
3317		 * space in the unwired region could have been mapped
3318		 * while the map lock was dropped for draining
3319		 * MAP_ENTRY_IN_TRANSITION.  Moreover, another thread
3320		 * could be simultaneously wiring this new mapping
3321		 * entry.  Detect these cases and skip any entries
3322		 * marked as in transition by us.
3323		 */
3324		if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 ||
3325		    entry->wiring_thread != curthread) {
3326			KASSERT(holes_ok,
3327			    ("vm_map_unwire: !HOLESOK and new/changed entry"));
3328			continue;
3329		}
3330
3331		if (rv == KERN_SUCCESS && (!user_unwire ||
3332		    (entry->eflags & MAP_ENTRY_USER_WIRED))) {
3333			if (entry->wired_count == 1)
3334				vm_map_entry_unwire(map, entry);
3335			else
3336				entry->wired_count--;
3337			if (user_unwire)
3338				entry->eflags &= ~MAP_ENTRY_USER_WIRED;
3339		}
3340		KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0,
3341		    ("vm_map_unwire: in-transition flag missing %p", entry));
3342		KASSERT(entry->wiring_thread == curthread,
3343		    ("vm_map_unwire: alien wire %p", entry));
3344		entry->eflags &= ~MAP_ENTRY_IN_TRANSITION;
3345		entry->wiring_thread = NULL;
3346		if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) {
3347			entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP;
3348			need_wakeup = true;
3349		}
3350		vm_map_try_merge_entries(map, prev_entry, entry);
3351	}
3352	vm_map_try_merge_entries(map, prev_entry, entry);
3353	vm_map_unlock(map);
3354	if (need_wakeup)
3355		vm_map_wakeup(map);
3356	return (rv);
3357}
3358
3359static void
3360vm_map_wire_user_count_sub(u_long npages)
3361{
3362
3363	atomic_subtract_long(&vm_user_wire_count, npages);
3364}
3365
3366static bool
3367vm_map_wire_user_count_add(u_long npages)
3368{
3369	u_long wired;
3370
3371	wired = vm_user_wire_count;
3372	do {
3373		if (npages + wired > vm_page_max_user_wired)
3374			return (false);
3375	} while (!atomic_fcmpset_long(&vm_user_wire_count, &wired,
3376	    npages + wired));
3377
3378	return (true);
3379}
3380
3381/*
3382 *	vm_map_wire_entry_failure:
3383 *
3384 *	Handle a wiring failure on the given entry.
3385 *
3386 *	The map should be locked.
3387 */
3388static void
3389vm_map_wire_entry_failure(vm_map_t map, vm_map_entry_t entry,
3390    vm_offset_t failed_addr)
3391{
3392
3393	VM_MAP_ASSERT_LOCKED(map);
3394	KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0 &&
3395	    entry->wired_count == 1,
3396	    ("vm_map_wire_entry_failure: entry %p isn't being wired", entry));
3397	KASSERT(failed_addr < entry->end,
3398	    ("vm_map_wire_entry_failure: entry %p was fully wired", entry));
3399
3400	/*
3401	 * If any pages at the start of this entry were successfully wired,
3402	 * then unwire them.
3403	 */
3404	if (failed_addr > entry->start) {
3405		pmap_unwire(map->pmap, entry->start, failed_addr);
3406		vm_object_unwire(entry->object.vm_object, entry->offset,
3407		    failed_addr - entry->start, PQ_ACTIVE);
3408	}
3409
3410	/*
3411	 * Assign an out-of-range value to represent the failure to wire this
3412	 * entry.
3413	 */
3414	entry->wired_count = -1;
3415}
3416
3417int
3418vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset_t end, int flags)
3419{
3420	int rv;
3421
3422	vm_map_lock(map);
3423	rv = vm_map_wire_locked(map, start, end, flags);
3424	vm_map_unlock(map);
3425	return (rv);
3426}
3427
3428/*
3429 *	vm_map_wire_locked:
3430 *
3431 *	Implements both kernel and user wiring.  Returns with the map locked,
3432 *	the map lock may be dropped.
3433 */
3434int
3435vm_map_wire_locked(vm_map_t map, vm_offset_t start, vm_offset_t end, int flags)
3436{
3437	vm_map_entry_t entry, first_entry, next_entry, prev_entry;
3438	vm_offset_t faddr, saved_end, saved_start;
3439	u_long incr, npages;
3440	u_int bidx, last_timestamp;
3441	int rv;
3442	bool holes_ok, need_wakeup, user_wire;
3443	vm_prot_t prot;
3444
3445	VM_MAP_ASSERT_LOCKED(map);
3446
3447	if (start == end)
3448		return (KERN_SUCCESS);
3449	prot = 0;
3450	if (flags & VM_MAP_WIRE_WRITE)
3451		prot |= VM_PROT_WRITE;
3452	holes_ok = (flags & VM_MAP_WIRE_HOLESOK) != 0;
3453	user_wire = (flags & VM_MAP_WIRE_USER) != 0;
3454	VM_MAP_RANGE_CHECK(map, start, end);
3455	if (!vm_map_lookup_entry(map, start, &first_entry)) {
3456		if (holes_ok)
3457			first_entry = vm_map_entry_succ(first_entry);
3458		else
3459			return (KERN_INVALID_ADDRESS);
3460	}
3461	for (entry = first_entry; entry->start < end; entry = next_entry) {
3462		if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
3463			/*
3464			 * We have not yet clipped the entry.
3465			 */
3466			next_entry = vm_map_entry_in_transition(map, start,
3467			    &end, holes_ok, entry);
3468			if (next_entry == NULL) {
3469				if (entry == first_entry)
3470					return (KERN_INVALID_ADDRESS);
3471				rv = KERN_INVALID_ADDRESS;
3472				goto done;
3473			}
3474			first_entry = (entry == first_entry) ?
3475			    next_entry : NULL;
3476			continue;
3477		}
3478		rv = vm_map_clip_start(map, entry, start);
3479		if (rv != KERN_SUCCESS)
3480			goto done;
3481		rv = vm_map_clip_end(map, entry, end);
3482		if (rv != KERN_SUCCESS)
3483			goto done;
3484
3485		/*
3486		 * Mark the entry in case the map lock is released.  (See
3487		 * above.)
3488		 */
3489		KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 &&
3490		    entry->wiring_thread == NULL,
3491		    ("owned map entry %p", entry));
3492		entry->eflags |= MAP_ENTRY_IN_TRANSITION;
3493		entry->wiring_thread = curthread;
3494		if ((entry->protection & (VM_PROT_READ | VM_PROT_EXECUTE)) == 0
3495		    || (entry->protection & prot) != prot) {
3496			entry->eflags |= MAP_ENTRY_WIRE_SKIPPED;
3497			if (!holes_ok) {
3498				end = entry->end;
3499				rv = KERN_INVALID_ADDRESS;
3500				goto done;
3501			}
3502		} else if (entry->wired_count == 0) {
3503			entry->wired_count++;
3504
3505			npages = atop(entry->end - entry->start);
3506			if (user_wire && !vm_map_wire_user_count_add(npages)) {
3507				vm_map_wire_entry_failure(map, entry,
3508				    entry->start);
3509				end = entry->end;
3510				rv = KERN_RESOURCE_SHORTAGE;
3511				goto done;
3512			}
3513
3514			/*
3515			 * Release the map lock, relying on the in-transition
3516			 * mark.  Mark the map busy for fork.
3517			 */
3518			saved_start = entry->start;
3519			saved_end = entry->end;
3520			last_timestamp = map->timestamp;
3521			bidx = (entry->eflags & MAP_ENTRY_SPLIT_BOUNDARY_MASK)
3522			    >> MAP_ENTRY_SPLIT_BOUNDARY_SHIFT;
3523			incr =  pagesizes[bidx];
3524			vm_map_busy(map);
3525			vm_map_unlock(map);
3526
3527			for (faddr = saved_start; faddr < saved_end;
3528			    faddr += incr) {
3529				/*
3530				 * Simulate a fault to get the page and enter
3531				 * it into the physical map.
3532				 */
3533				rv = vm_fault(map, faddr, VM_PROT_NONE,
3534				    VM_FAULT_WIRE, NULL);
3535				if (rv != KERN_SUCCESS)
3536					break;
3537			}
3538			vm_map_lock(map);
3539			vm_map_unbusy(map);
3540			if (last_timestamp + 1 != map->timestamp) {
3541				/*
3542				 * Look again for the entry because the map was
3543				 * modified while it was unlocked.  The entry
3544				 * may have been clipped, but NOT merged or
3545				 * deleted.
3546				 */
3547				if (!vm_map_lookup_entry(map, saved_start,
3548				    &next_entry))
3549					KASSERT(false,
3550					    ("vm_map_wire: lookup failed"));
3551				first_entry = (entry == first_entry) ?
3552				    next_entry : NULL;
3553				for (entry = next_entry; entry->end < saved_end;
3554				    entry = vm_map_entry_succ(entry)) {
3555					/*
3556					 * In case of failure, handle entries
3557					 * that were not fully wired here;
3558					 * fully wired entries are handled
3559					 * later.
3560					 */
3561					if (rv != KERN_SUCCESS &&
3562					    faddr < entry->end)
3563						vm_map_wire_entry_failure(map,
3564						    entry, faddr);
3565				}
3566			}
3567			if (rv != KERN_SUCCESS) {
3568				vm_map_wire_entry_failure(map, entry, faddr);
3569				if (user_wire)
3570					vm_map_wire_user_count_sub(npages);
3571				end = entry->end;
3572				goto done;
3573			}
3574		} else if (!user_wire ||
3575			   (entry->eflags & MAP_ENTRY_USER_WIRED) == 0) {
3576			entry->wired_count++;
3577		}
3578		/*
3579		 * Check the map for holes in the specified region.
3580		 * If holes_ok was specified, skip this check.
3581		 */
3582		next_entry = vm_map_entry_succ(entry);
3583		if (!holes_ok &&
3584		    entry->end < end && next_entry->start > entry->end) {
3585			end = entry->end;
3586			rv = KERN_INVALID_ADDRESS;
3587			goto done;
3588		}
3589	}
3590	rv = KERN_SUCCESS;
3591done:
3592	need_wakeup = false;
3593	if (first_entry == NULL &&
3594	    !vm_map_lookup_entry(map, start, &first_entry)) {
3595		KASSERT(holes_ok, ("vm_map_wire: lookup failed"));
3596		prev_entry = first_entry;
3597		entry = vm_map_entry_succ(first_entry);
3598	} else {
3599		prev_entry = vm_map_entry_pred(first_entry);
3600		entry = first_entry;
3601	}
3602	for (; entry->start < end;
3603	    prev_entry = entry, entry = vm_map_entry_succ(entry)) {
3604		/*
3605		 * If holes_ok was specified, an empty
3606		 * space in the unwired region could have been mapped
3607		 * while the map lock was dropped for faulting in the
3608		 * pages or draining MAP_ENTRY_IN_TRANSITION.
3609		 * Moreover, another thread could be simultaneously
3610		 * wiring this new mapping entry.  Detect these cases
3611		 * and skip any entries marked as in transition not by us.
3612		 *
3613		 * Another way to get an entry not marked with
3614		 * MAP_ENTRY_IN_TRANSITION is after failed clipping,
3615		 * which set rv to KERN_INVALID_ARGUMENT.
3616		 */
3617		if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 ||
3618		    entry->wiring_thread != curthread) {
3619			KASSERT(holes_ok || rv == KERN_INVALID_ARGUMENT,
3620			    ("vm_map_wire: !HOLESOK and new/changed entry"));
3621			continue;
3622		}
3623
3624		if ((entry->eflags & MAP_ENTRY_WIRE_SKIPPED) != 0) {
3625			/* do nothing */
3626		} else if (rv == KERN_SUCCESS) {
3627			if (user_wire)
3628				entry->eflags |= MAP_ENTRY_USER_WIRED;
3629		} else if (entry->wired_count == -1) {
3630			/*
3631			 * Wiring failed on this entry.  Thus, unwiring is
3632			 * unnecessary.
3633			 */
3634			entry->wired_count = 0;
3635		} else if (!user_wire ||
3636		    (entry->eflags & MAP_ENTRY_USER_WIRED) == 0) {
3637			/*
3638			 * Undo the wiring.  Wiring succeeded on this entry
3639			 * but failed on a later entry.
3640			 */
3641			if (entry->wired_count == 1) {
3642				vm_map_entry_unwire(map, entry);
3643				if (user_wire)
3644					vm_map_wire_user_count_sub(
3645					    atop(entry->end - entry->start));
3646			} else
3647				entry->wired_count--;
3648		}
3649		KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0,
3650		    ("vm_map_wire: in-transition flag missing %p", entry));
3651		KASSERT(entry->wiring_thread == curthread,
3652		    ("vm_map_wire: alien wire %p", entry));
3653		entry->eflags &= ~(MAP_ENTRY_IN_TRANSITION |
3654		    MAP_ENTRY_WIRE_SKIPPED);
3655		entry->wiring_thread = NULL;
3656		if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) {
3657			entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP;
3658			need_wakeup = true;
3659		}
3660		vm_map_try_merge_entries(map, prev_entry, entry);
3661	}
3662	vm_map_try_merge_entries(map, prev_entry, entry);
3663	if (need_wakeup)
3664		vm_map_wakeup(map);
3665	return (rv);
3666}
3667
3668/*
3669 * vm_map_sync
3670 *
3671 * Push any dirty cached pages in the address range to their pager.
3672 * If syncio is TRUE, dirty pages are written synchronously.
3673 * If invalidate is TRUE, any cached pages are freed as well.
3674 *
3675 * If the size of the region from start to end is zero, we are
3676 * supposed to flush all modified pages within the region containing
3677 * start.  Unfortunately, a region can be split or coalesced with
3678 * neighboring regions, making it difficult to determine what the
3679 * original region was.  Therefore, we approximate this requirement by
3680 * flushing the current region containing start.
3681 *
3682 * Returns an error if any part of the specified range is not mapped.
3683 */
3684int
3685vm_map_sync(
3686	vm_map_t map,
3687	vm_offset_t start,
3688	vm_offset_t end,
3689	boolean_t syncio,
3690	boolean_t invalidate)
3691{
3692	vm_map_entry_t entry, first_entry, next_entry;
3693	vm_size_t size;
3694	vm_object_t object;
3695	vm_ooffset_t offset;
3696	unsigned int last_timestamp;
3697	int bdry_idx;
3698	boolean_t failed;
3699
3700	vm_map_lock_read(map);
3701	VM_MAP_RANGE_CHECK(map, start, end);
3702	if (!vm_map_lookup_entry(map, start, &first_entry)) {
3703		vm_map_unlock_read(map);
3704		return (KERN_INVALID_ADDRESS);
3705	} else if (start == end) {
3706		start = first_entry->start;
3707		end = first_entry->end;
3708	}
3709
3710	/*
3711	 * Make a first pass to check for user-wired memory, holes,
3712	 * and partial invalidation of largepage mappings.
3713	 */
3714	for (entry = first_entry; entry->start < end; entry = next_entry) {
3715		if (invalidate) {
3716			if ((entry->eflags & MAP_ENTRY_USER_WIRED) != 0) {
3717				vm_map_unlock_read(map);
3718				return (KERN_INVALID_ARGUMENT);
3719			}
3720			bdry_idx = (entry->eflags &
3721			    MAP_ENTRY_SPLIT_BOUNDARY_MASK) >>
3722			    MAP_ENTRY_SPLIT_BOUNDARY_SHIFT;
3723			if (bdry_idx != 0 &&
3724			    ((start & (pagesizes[bdry_idx] - 1)) != 0 ||
3725			    (end & (pagesizes[bdry_idx] - 1)) != 0)) {
3726				vm_map_unlock_read(map);
3727				return (KERN_INVALID_ARGUMENT);
3728			}
3729		}
3730		next_entry = vm_map_entry_succ(entry);
3731		if (end > entry->end &&
3732		    entry->end != next_entry->start) {
3733			vm_map_unlock_read(map);
3734			return (KERN_INVALID_ADDRESS);
3735		}
3736	}
3737
3738	if (invalidate)
3739		pmap_remove(map->pmap, start, end);
3740	failed = FALSE;
3741
3742	/*
3743	 * Make a second pass, cleaning/uncaching pages from the indicated
3744	 * objects as we go.
3745	 */
3746	for (entry = first_entry; entry->start < end;) {
3747		offset = entry->offset + (start - entry->start);
3748		size = (end <= entry->end ? end : entry->end) - start;
3749		if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0) {
3750			vm_map_t smap;
3751			vm_map_entry_t tentry;
3752			vm_size_t tsize;
3753
3754			smap = entry->object.sub_map;
3755			vm_map_lock_read(smap);
3756			(void) vm_map_lookup_entry(smap, offset, &tentry);
3757			tsize = tentry->end - offset;
3758			if (tsize < size)
3759				size = tsize;
3760			object = tentry->object.vm_object;
3761			offset = tentry->offset + (offset - tentry->start);
3762			vm_map_unlock_read(smap);
3763		} else {
3764			object = entry->object.vm_object;
3765		}
3766		vm_object_reference(object);
3767		last_timestamp = map->timestamp;
3768		vm_map_unlock_read(map);
3769		if (!vm_object_sync(object, offset, size, syncio, invalidate))
3770			failed = TRUE;
3771		start += size;
3772		vm_object_deallocate(object);
3773		vm_map_lock_read(map);
3774		if (last_timestamp == map->timestamp ||
3775		    !vm_map_lookup_entry(map, start, &entry))
3776			entry = vm_map_entry_succ(entry);
3777	}
3778
3779	vm_map_unlock_read(map);
3780	return (failed ? KERN_FAILURE : KERN_SUCCESS);
3781}
3782
3783/*
3784 *	vm_map_entry_unwire:	[ internal use only ]
3785 *
3786 *	Make the region specified by this entry pageable.
3787 *
3788 *	The map in question should be locked.
3789 *	[This is the reason for this routine's existence.]
3790 */
3791static void
3792vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry)
3793{
3794	vm_size_t size;
3795
3796	VM_MAP_ASSERT_LOCKED(map);
3797	KASSERT(entry->wired_count > 0,
3798	    ("vm_map_entry_unwire: entry %p isn't wired", entry));
3799
3800	size = entry->end - entry->start;
3801	if ((entry->eflags & MAP_ENTRY_USER_WIRED) != 0)
3802		vm_map_wire_user_count_sub(atop(size));
3803	pmap_unwire(map->pmap, entry->start, entry->end);
3804	vm_object_unwire(entry->object.vm_object, entry->offset, size,
3805	    PQ_ACTIVE);
3806	entry->wired_count = 0;
3807}
3808
3809static void
3810vm_map_entry_deallocate(vm_map_entry_t entry, boolean_t system_map)
3811{
3812
3813	if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0)
3814		vm_object_deallocate(entry->object.vm_object);
3815	uma_zfree(system_map ? kmapentzone : mapentzone, entry);
3816}
3817
3818/*
3819 *	vm_map_entry_delete:	[ internal use only ]
3820 *
3821 *	Deallocate the given entry from the target map.
3822 */
3823static void
3824vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry)
3825{
3826	vm_object_t object;
3827	vm_pindex_t offidxstart, offidxend, size1;
3828	vm_size_t size;
3829
3830	vm_map_entry_unlink(map, entry, UNLINK_MERGE_NONE);
3831	object = entry->object.vm_object;
3832
3833	if ((entry->eflags & MAP_ENTRY_GUARD) != 0) {
3834		MPASS(entry->cred == NULL);
3835		MPASS((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0);
3836		MPASS(object == NULL);
3837		vm_map_entry_deallocate(entry, map->system_map);
3838		return;
3839	}
3840
3841	size = entry->end - entry->start;
3842	map->size -= size;
3843
3844	if (entry->cred != NULL) {
3845		swap_release_by_cred(size, entry->cred);
3846		crfree(entry->cred);
3847	}
3848
3849	if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0 || object == NULL) {
3850		entry->object.vm_object = NULL;
3851	} else if ((object->flags & OBJ_ANON) != 0 ||
3852	    object == kernel_object) {
3853		KASSERT(entry->cred == NULL || object->cred == NULL ||
3854		    (entry->eflags & MAP_ENTRY_NEEDS_COPY),
3855		    ("OVERCOMMIT vm_map_entry_delete: both cred %p", entry));
3856		offidxstart = OFF_TO_IDX(entry->offset);
3857		offidxend = offidxstart + atop(size);
3858		VM_OBJECT_WLOCK(object);
3859		if (object->ref_count != 1 &&
3860		    ((object->flags & OBJ_ONEMAPPING) != 0 ||
3861		    object == kernel_object)) {
3862			vm_object_collapse(object);
3863
3864			/*
3865			 * The option OBJPR_NOTMAPPED can be passed here
3866			 * because vm_map_delete() already performed
3867			 * pmap_remove() on the only mapping to this range
3868			 * of pages.
3869			 */
3870			vm_object_page_remove(object, offidxstart, offidxend,
3871			    OBJPR_NOTMAPPED);
3872			if (offidxend >= object->size &&
3873			    offidxstart < object->size) {
3874				size1 = object->size;
3875				object->size = offidxstart;
3876				if (object->cred != NULL) {
3877					size1 -= object->size;
3878					KASSERT(object->charge >= ptoa(size1),
3879					    ("object %p charge < 0", object));
3880					swap_release_by_cred(ptoa(size1),
3881					    object->cred);
3882					object->charge -= ptoa(size1);
3883				}
3884			}
3885		}
3886		VM_OBJECT_WUNLOCK(object);
3887	}
3888	if (map->system_map)
3889		vm_map_entry_deallocate(entry, TRUE);
3890	else {
3891		entry->defer_next = curthread->td_map_def_user;
3892		curthread->td_map_def_user = entry;
3893	}
3894}
3895
3896/*
3897 *	vm_map_delete:	[ internal use only ]
3898 *
3899 *	Deallocates the given address range from the target
3900 *	map.
3901 */
3902int
3903vm_map_delete(vm_map_t map, vm_offset_t start, vm_offset_t end)
3904{
3905	vm_map_entry_t entry, next_entry, scratch_entry;
3906	int rv;
3907
3908	VM_MAP_ASSERT_LOCKED(map);
3909
3910	if (start == end)
3911		return (KERN_SUCCESS);
3912
3913	/*
3914	 * Find the start of the region, and clip it.
3915	 * Step through all entries in this region.
3916	 */
3917	rv = vm_map_lookup_clip_start(map, start, &entry, &scratch_entry);
3918	if (rv != KERN_SUCCESS)
3919		return (rv);
3920	for (; entry->start < end; entry = next_entry) {
3921		/*
3922		 * Wait for wiring or unwiring of an entry to complete.
3923		 * Also wait for any system wirings to disappear on
3924		 * user maps.
3925		 */
3926		if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0 ||
3927		    (vm_map_pmap(map) != kernel_pmap &&
3928		    vm_map_entry_system_wired_count(entry) != 0)) {
3929			unsigned int last_timestamp;
3930			vm_offset_t saved_start;
3931
3932			saved_start = entry->start;
3933			entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
3934			last_timestamp = map->timestamp;
3935			(void) vm_map_unlock_and_wait(map, 0);
3936			vm_map_lock(map);
3937			if (last_timestamp + 1 != map->timestamp) {
3938				/*
3939				 * Look again for the entry because the map was
3940				 * modified while it was unlocked.
3941				 * Specifically, the entry may have been
3942				 * clipped, merged, or deleted.
3943				 */
3944				rv = vm_map_lookup_clip_start(map, saved_start,
3945				    &next_entry, &scratch_entry);
3946				if (rv != KERN_SUCCESS)
3947					break;
3948			} else
3949				next_entry = entry;
3950			continue;
3951		}
3952
3953		/* XXXKIB or delete to the upper superpage boundary ? */
3954		rv = vm_map_clip_end(map, entry, end);
3955		if (rv != KERN_SUCCESS)
3956			break;
3957		next_entry = vm_map_entry_succ(entry);
3958
3959		/*
3960		 * Unwire before removing addresses from the pmap; otherwise,
3961		 * unwiring will put the entries back in the pmap.
3962		 */
3963		if (entry->wired_count != 0)
3964			vm_map_entry_unwire(map, entry);
3965
3966		/*
3967		 * Remove mappings for the pages, but only if the
3968		 * mappings could exist.  For instance, it does not
3969		 * make sense to call pmap_remove() for guard entries.
3970		 */
3971		if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0 ||
3972		    entry->object.vm_object != NULL)
3973			pmap_remove(map->pmap, entry->start, entry->end);
3974
3975		if (entry->end == map->anon_loc)
3976			map->anon_loc = entry->start;
3977
3978		/*
3979		 * Delete the entry only after removing all pmap
3980		 * entries pointing to its pages.  (Otherwise, its
3981		 * page frames may be reallocated, and any modify bits
3982		 * will be set in the wrong object!)
3983		 */
3984		vm_map_entry_delete(map, entry);
3985	}
3986	return (rv);
3987}
3988
3989/*
3990 *	vm_map_remove:
3991 *
3992 *	Remove the given address range from the target map.
3993 *	This is the exported form of vm_map_delete.
3994 */
3995int
3996vm_map_remove(vm_map_t map, vm_offset_t start, vm_offset_t end)
3997{
3998	int result;
3999
4000	vm_map_lock(map);
4001	VM_MAP_RANGE_CHECK(map, start, end);
4002	result = vm_map_delete(map, start, end);
4003	vm_map_unlock(map);
4004	return (result);
4005}
4006
4007/*
4008 *	vm_map_check_protection:
4009 *
4010 *	Assert that the target map allows the specified privilege on the
4011 *	entire address region given.  The entire region must be allocated.
4012 *
4013 *	WARNING!  This code does not and should not check whether the
4014 *	contents of the region is accessible.  For example a smaller file
4015 *	might be mapped into a larger address space.
4016 *
4017 *	NOTE!  This code is also called by munmap().
4018 *
4019 *	The map must be locked.  A read lock is sufficient.
4020 */
4021boolean_t
4022vm_map_check_protection(vm_map_t map, vm_offset_t start, vm_offset_t end,
4023			vm_prot_t protection)
4024{
4025	vm_map_entry_t entry;
4026	vm_map_entry_t tmp_entry;
4027
4028	if (!vm_map_lookup_entry(map, start, &tmp_entry))
4029		return (FALSE);
4030	entry = tmp_entry;
4031
4032	while (start < end) {
4033		/*
4034		 * No holes allowed!
4035		 */
4036		if (start < entry->start)
4037			return (FALSE);
4038		/*
4039		 * Check protection associated with entry.
4040		 */
4041		if ((entry->protection & protection) != protection)
4042			return (FALSE);
4043		/* go to next entry */
4044		start = entry->end;
4045		entry = vm_map_entry_succ(entry);
4046	}
4047	return (TRUE);
4048}
4049
4050/*
4051 *
4052 *	vm_map_copy_swap_object:
4053 *
4054 *	Copies a swap-backed object from an existing map entry to a
4055 *	new one.  Carries forward the swap charge.  May change the
4056 *	src object on return.
4057 */
4058static void
4059vm_map_copy_swap_object(vm_map_entry_t src_entry, vm_map_entry_t dst_entry,
4060    vm_offset_t size, vm_ooffset_t *fork_charge)
4061{
4062	vm_object_t src_object;
4063	struct ucred *cred;
4064	int charged;
4065
4066	src_object = src_entry->object.vm_object;
4067	charged = ENTRY_CHARGED(src_entry);
4068	if ((src_object->flags & OBJ_ANON) != 0) {
4069		VM_OBJECT_WLOCK(src_object);
4070		vm_object_collapse(src_object);
4071		if ((src_object->flags & OBJ_ONEMAPPING) != 0) {
4072			vm_object_split(src_entry);
4073			src_object = src_entry->object.vm_object;
4074		}
4075		vm_object_reference_locked(src_object);
4076		vm_object_clear_flag(src_object, OBJ_ONEMAPPING);
4077		VM_OBJECT_WUNLOCK(src_object);
4078	} else
4079		vm_object_reference(src_object);
4080	if (src_entry->cred != NULL &&
4081	    !(src_entry->eflags & MAP_ENTRY_NEEDS_COPY)) {
4082		KASSERT(src_object->cred == NULL,
4083		    ("OVERCOMMIT: vm_map_copy_anon_entry: cred %p",
4084		     src_object));
4085		src_object->cred = src_entry->cred;
4086		src_object->charge = size;
4087	}
4088	dst_entry->object.vm_object = src_object;
4089	if (charged) {
4090		cred = curthread->td_ucred;
4091		crhold(cred);
4092		dst_entry->cred = cred;
4093		*fork_charge += size;
4094		if (!(src_entry->eflags & MAP_ENTRY_NEEDS_COPY)) {
4095			crhold(cred);
4096			src_entry->cred = cred;
4097			*fork_charge += size;
4098		}
4099	}
4100}
4101
4102/*
4103 *	vm_map_copy_entry:
4104 *
4105 *	Copies the contents of the source entry to the destination
4106 *	entry.  The entries *must* be aligned properly.
4107 */
4108static void
4109vm_map_copy_entry(
4110	vm_map_t src_map,
4111	vm_map_t dst_map,
4112	vm_map_entry_t src_entry,
4113	vm_map_entry_t dst_entry,
4114	vm_ooffset_t *fork_charge)
4115{
4116	vm_object_t src_object;
4117	vm_map_entry_t fake_entry;
4118	vm_offset_t size;
4119
4120	VM_MAP_ASSERT_LOCKED(dst_map);
4121
4122	if ((dst_entry->eflags|src_entry->eflags) & MAP_ENTRY_IS_SUB_MAP)
4123		return;
4124
4125	if (src_entry->wired_count == 0 ||
4126	    (src_entry->protection & VM_PROT_WRITE) == 0) {
4127		/*
4128		 * If the source entry is marked needs_copy, it is already
4129		 * write-protected.
4130		 */
4131		if ((src_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0 &&
4132		    (src_entry->protection & VM_PROT_WRITE) != 0) {
4133			pmap_protect(src_map->pmap,
4134			    src_entry->start,
4135			    src_entry->end,
4136			    src_entry->protection & ~VM_PROT_WRITE);
4137		}
4138
4139		/*
4140		 * Make a copy of the object.
4141		 */
4142		size = src_entry->end - src_entry->start;
4143		if ((src_object = src_entry->object.vm_object) != NULL) {
4144			if (src_object->type == OBJT_DEFAULT ||
4145			    (src_object->flags & OBJ_SWAP) != 0) {
4146				vm_map_copy_swap_object(src_entry, dst_entry,
4147				    size, fork_charge);
4148				/* May have split/collapsed, reload obj. */
4149				src_object = src_entry->object.vm_object;
4150			} else {
4151				vm_object_reference(src_object);
4152				dst_entry->object.vm_object = src_object;
4153			}
4154			src_entry->eflags |= MAP_ENTRY_COW |
4155			    MAP_ENTRY_NEEDS_COPY;
4156			dst_entry->eflags |= MAP_ENTRY_COW |
4157			    MAP_ENTRY_NEEDS_COPY;
4158			dst_entry->offset = src_entry->offset;
4159			if (src_entry->eflags & MAP_ENTRY_WRITECNT) {
4160				/*
4161				 * MAP_ENTRY_WRITECNT cannot
4162				 * indicate write reference from
4163				 * src_entry, since the entry is
4164				 * marked as needs copy.  Allocate a
4165				 * fake entry that is used to
4166				 * decrement object->un_pager writecount
4167				 * at the appropriate time.  Attach
4168				 * fake_entry to the deferred list.
4169				 */
4170				fake_entry = vm_map_entry_create(dst_map);
4171				fake_entry->eflags = MAP_ENTRY_WRITECNT;
4172				src_entry->eflags &= ~MAP_ENTRY_WRITECNT;
4173				vm_object_reference(src_object);
4174				fake_entry->object.vm_object = src_object;
4175				fake_entry->start = src_entry->start;
4176				fake_entry->end = src_entry->end;
4177				fake_entry->defer_next =
4178				    curthread->td_map_def_user;
4179				curthread->td_map_def_user = fake_entry;
4180			}
4181
4182			pmap_copy(dst_map->pmap, src_map->pmap,
4183			    dst_entry->start, dst_entry->end - dst_entry->start,
4184			    src_entry->start);
4185		} else {
4186			dst_entry->object.vm_object = NULL;
4187			dst_entry->offset = 0;
4188			if (src_entry->cred != NULL) {
4189				dst_entry->cred = curthread->td_ucred;
4190				crhold(dst_entry->cred);
4191				*fork_charge += size;
4192			}
4193		}
4194	} else {
4195		/*
4196		 * We don't want to make writeable wired pages copy-on-write.
4197		 * Immediately copy these pages into the new map by simulating
4198		 * page faults.  The new pages are pageable.
4199		 */
4200		vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry,
4201		    fork_charge);
4202	}
4203}
4204
4205/*
4206 * vmspace_map_entry_forked:
4207 * Update the newly-forked vmspace each time a map entry is inherited
4208 * or copied.  The values for vm_dsize and vm_tsize are approximate
4209 * (and mostly-obsolete ideas in the face of mmap(2) et al.)
4210 */
4211static void
4212vmspace_map_entry_forked(const struct vmspace *vm1, struct vmspace *vm2,
4213    vm_map_entry_t entry)
4214{
4215	vm_size_t entrysize;
4216	vm_offset_t newend;
4217
4218	if ((entry->eflags & MAP_ENTRY_GUARD) != 0)
4219		return;
4220	entrysize = entry->end - entry->start;
4221	vm2->vm_map.size += entrysize;
4222	if (entry->eflags & (MAP_ENTRY_GROWS_DOWN | MAP_ENTRY_GROWS_UP)) {
4223		vm2->vm_ssize += btoc(entrysize);
4224	} else if (entry->start >= (vm_offset_t)vm1->vm_daddr &&
4225	    entry->start < (vm_offset_t)vm1->vm_daddr + ctob(vm1->vm_dsize)) {
4226		newend = MIN(entry->end,
4227		    (vm_offset_t)vm1->vm_daddr + ctob(vm1->vm_dsize));
4228		vm2->vm_dsize += btoc(newend - entry->start);
4229	} else if (entry->start >= (vm_offset_t)vm1->vm_taddr &&
4230	    entry->start < (vm_offset_t)vm1->vm_taddr + ctob(vm1->vm_tsize)) {
4231		newend = MIN(entry->end,
4232		    (vm_offset_t)vm1->vm_taddr + ctob(vm1->vm_tsize));
4233		vm2->vm_tsize += btoc(newend - entry->start);
4234	}
4235}
4236
4237/*
4238 * vmspace_fork:
4239 * Create a new process vmspace structure and vm_map
4240 * based on those of an existing process.  The new map
4241 * is based on the old map, according to the inheritance
4242 * values on the regions in that map.
4243 *
4244 * XXX It might be worth coalescing the entries added to the new vmspace.
4245 *
4246 * The source map must not be locked.
4247 */
4248struct vmspace *
4249vmspace_fork(struct vmspace *vm1, vm_ooffset_t *fork_charge)
4250{
4251	struct vmspace *vm2;
4252	vm_map_t new_map, old_map;
4253	vm_map_entry_t new_entry, old_entry;
4254	vm_object_t object;
4255	int error, locked;
4256	vm_inherit_t inh;
4257
4258	old_map = &vm1->vm_map;
4259	/* Copy immutable fields of vm1 to vm2. */
4260	vm2 = vmspace_alloc(vm_map_min(old_map), vm_map_max(old_map),
4261	    pmap_pinit);
4262	if (vm2 == NULL)
4263		return (NULL);
4264
4265	vm2->vm_taddr = vm1->vm_taddr;
4266	vm2->vm_daddr = vm1->vm_daddr;
4267	vm2->vm_maxsaddr = vm1->vm_maxsaddr;
4268	vm_map_lock(old_map);
4269	if (old_map->busy)
4270		vm_map_wait_busy(old_map);
4271	new_map = &vm2->vm_map;
4272	locked = vm_map_trylock(new_map); /* trylock to silence WITNESS */
4273	KASSERT(locked, ("vmspace_fork: lock failed"));
4274
4275	error = pmap_vmspace_copy(new_map->pmap, old_map->pmap);
4276	if (error != 0) {
4277		sx_xunlock(&old_map->lock);
4278		sx_xunlock(&new_map->lock);
4279		vm_map_process_deferred();
4280		vmspace_free(vm2);
4281		return (NULL);
4282	}
4283
4284	new_map->anon_loc = old_map->anon_loc;
4285	new_map->flags |= old_map->flags & (MAP_ASLR | MAP_ASLR_IGNSTART |
4286	    MAP_WXORX);
4287
4288	VM_MAP_ENTRY_FOREACH(old_entry, old_map) {
4289		if ((old_entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0)
4290			panic("vm_map_fork: encountered a submap");
4291
4292		inh = old_entry->inheritance;
4293		if ((old_entry->eflags & MAP_ENTRY_GUARD) != 0 &&
4294		    inh != VM_INHERIT_NONE)
4295			inh = VM_INHERIT_COPY;
4296
4297		switch (inh) {
4298		case VM_INHERIT_NONE:
4299			break;
4300
4301		case VM_INHERIT_SHARE:
4302			/*
4303			 * Clone the entry, creating the shared object if
4304			 * necessary.
4305			 */
4306			object = old_entry->object.vm_object;
4307			if (object == NULL) {
4308				vm_map_entry_back(old_entry);
4309				object = old_entry->object.vm_object;
4310			}
4311
4312			/*
4313			 * Add the reference before calling vm_object_shadow
4314			 * to insure that a shadow object is created.
4315			 */
4316			vm_object_reference(object);
4317			if (old_entry->eflags & MAP_ENTRY_NEEDS_COPY) {
4318				vm_object_shadow(&old_entry->object.vm_object,
4319				    &old_entry->offset,
4320				    old_entry->end - old_entry->start,
4321				    old_entry->cred,
4322				    /* Transfer the second reference too. */
4323				    true);
4324				old_entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
4325				old_entry->cred = NULL;
4326
4327				/*
4328				 * As in vm_map_merged_neighbor_dispose(),
4329				 * the vnode lock will not be acquired in
4330				 * this call to vm_object_deallocate().
4331				 */
4332				vm_object_deallocate(object);
4333				object = old_entry->object.vm_object;
4334			} else {
4335				VM_OBJECT_WLOCK(object);
4336				vm_object_clear_flag(object, OBJ_ONEMAPPING);
4337				if (old_entry->cred != NULL) {
4338					KASSERT(object->cred == NULL,
4339					    ("vmspace_fork both cred"));
4340					object->cred = old_entry->cred;
4341					object->charge = old_entry->end -
4342					    old_entry->start;
4343					old_entry->cred = NULL;
4344				}
4345
4346				/*
4347				 * Assert the correct state of the vnode
4348				 * v_writecount while the object is locked, to
4349				 * not relock it later for the assertion
4350				 * correctness.
4351				 */
4352				if (old_entry->eflags & MAP_ENTRY_WRITECNT &&
4353				    object->type == OBJT_VNODE) {
4354					KASSERT(((struct vnode *)object->
4355					    handle)->v_writecount > 0,
4356					    ("vmspace_fork: v_writecount %p",
4357					    object));
4358					KASSERT(object->un_pager.vnp.
4359					    writemappings > 0,
4360					    ("vmspace_fork: vnp.writecount %p",
4361					    object));
4362				}
4363				VM_OBJECT_WUNLOCK(object);
4364			}
4365
4366			/*
4367			 * Clone the entry, referencing the shared object.
4368			 */
4369			new_entry = vm_map_entry_create(new_map);
4370			*new_entry = *old_entry;
4371			new_entry->eflags &= ~(MAP_ENTRY_USER_WIRED |
4372			    MAP_ENTRY_IN_TRANSITION);
4373			new_entry->wiring_thread = NULL;
4374			new_entry->wired_count = 0;
4375			if (new_entry->eflags & MAP_ENTRY_WRITECNT) {
4376				vm_pager_update_writecount(object,
4377				    new_entry->start, new_entry->end);
4378			}
4379			vm_map_entry_set_vnode_text(new_entry, true);
4380
4381			/*
4382			 * Insert the entry into the new map -- we know we're
4383			 * inserting at the end of the new map.
4384			 */
4385			vm_map_entry_link(new_map, new_entry);
4386			vmspace_map_entry_forked(vm1, vm2, new_entry);
4387
4388			/*
4389			 * Update the physical map
4390			 */
4391			pmap_copy(new_map->pmap, old_map->pmap,
4392			    new_entry->start,
4393			    (old_entry->end - old_entry->start),
4394			    old_entry->start);
4395			break;
4396
4397		case VM_INHERIT_COPY:
4398			/*
4399			 * Clone the entry and link into the map.
4400			 */
4401			new_entry = vm_map_entry_create(new_map);
4402			*new_entry = *old_entry;
4403			/*
4404			 * Copied entry is COW over the old object.
4405			 */
4406			new_entry->eflags &= ~(MAP_ENTRY_USER_WIRED |
4407			    MAP_ENTRY_IN_TRANSITION | MAP_ENTRY_WRITECNT);
4408			new_entry->wiring_thread = NULL;
4409			new_entry->wired_count = 0;
4410			new_entry->object.vm_object = NULL;
4411			new_entry->cred = NULL;
4412			vm_map_entry_link(new_map, new_entry);
4413			vmspace_map_entry_forked(vm1, vm2, new_entry);
4414			vm_map_copy_entry(old_map, new_map, old_entry,
4415			    new_entry, fork_charge);
4416			vm_map_entry_set_vnode_text(new_entry, true);
4417			break;
4418
4419		case VM_INHERIT_ZERO:
4420			/*
4421			 * Create a new anonymous mapping entry modelled from
4422			 * the old one.
4423			 */
4424			new_entry = vm_map_entry_create(new_map);
4425			memset(new_entry, 0, sizeof(*new_entry));
4426
4427			new_entry->start = old_entry->start;
4428			new_entry->end = old_entry->end;
4429			new_entry->eflags = old_entry->eflags &
4430			    ~(MAP_ENTRY_USER_WIRED | MAP_ENTRY_IN_TRANSITION |
4431			    MAP_ENTRY_WRITECNT | MAP_ENTRY_VN_EXEC |
4432			    MAP_ENTRY_SPLIT_BOUNDARY_MASK);
4433			new_entry->protection = old_entry->protection;
4434			new_entry->max_protection = old_entry->max_protection;
4435			new_entry->inheritance = VM_INHERIT_ZERO;
4436
4437			vm_map_entry_link(new_map, new_entry);
4438			vmspace_map_entry_forked(vm1, vm2, new_entry);
4439
4440			new_entry->cred = curthread->td_ucred;
4441			crhold(new_entry->cred);
4442			*fork_charge += (new_entry->end - new_entry->start);
4443
4444			break;
4445		}
4446	}
4447	/*
4448	 * Use inlined vm_map_unlock() to postpone handling the deferred
4449	 * map entries, which cannot be done until both old_map and
4450	 * new_map locks are released.
4451	 */
4452	sx_xunlock(&old_map->lock);
4453	sx_xunlock(&new_map->lock);
4454	vm_map_process_deferred();
4455
4456	return (vm2);
4457}
4458
4459/*
4460 * Create a process's stack for exec_new_vmspace().  This function is never
4461 * asked to wire the newly created stack.
4462 */
4463int
4464vm_map_stack(vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize,
4465    vm_prot_t prot, vm_prot_t max, int cow)
4466{
4467	vm_size_t growsize, init_ssize;
4468	rlim_t vmemlim;
4469	int rv;
4470
4471	MPASS((map->flags & MAP_WIREFUTURE) == 0);
4472	growsize = sgrowsiz;
4473	init_ssize = (max_ssize < growsize) ? max_ssize : growsize;
4474	vm_map_lock(map);
4475	vmemlim = lim_cur(curthread, RLIMIT_VMEM);
4476	/* If we would blow our VMEM resource limit, no go */
4477	if (map->size + init_ssize > vmemlim) {
4478		rv = KERN_NO_SPACE;
4479		goto out;
4480	}
4481	rv = vm_map_stack_locked(map, addrbos, max_ssize, growsize, prot,
4482	    max, cow);
4483out:
4484	vm_map_unlock(map);
4485	return (rv);
4486}
4487
4488static int stack_guard_page = 1;
4489SYSCTL_INT(_security_bsd, OID_AUTO, stack_guard_page, CTLFLAG_RWTUN,
4490    &stack_guard_page, 0,
4491    "Specifies the number of guard pages for a stack that grows");
4492
4493static int
4494vm_map_stack_locked(vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize,
4495    vm_size_t growsize, vm_prot_t prot, vm_prot_t max, int cow)
4496{
4497	vm_map_entry_t new_entry, prev_entry;
4498	vm_offset_t bot, gap_bot, gap_top, top;
4499	vm_size_t init_ssize, sgp;
4500	int orient, rv;
4501
4502	/*
4503	 * The stack orientation is piggybacked with the cow argument.
4504	 * Extract it into orient and mask the cow argument so that we
4505	 * don't pass it around further.
4506	 */
4507	orient = cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP);
4508	KASSERT(orient != 0, ("No stack grow direction"));
4509	KASSERT(orient != (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP),
4510	    ("bi-dir stack"));
4511
4512	if (max_ssize == 0 ||
4513	    !vm_map_range_valid(map, addrbos, addrbos + max_ssize))
4514		return (KERN_INVALID_ADDRESS);
4515	sgp = ((curproc->p_flag2 & P2_STKGAP_DISABLE) != 0 ||
4516	    (curproc->p_fctl0 & NT_FREEBSD_FCTL_STKGAP_DISABLE) != 0) ? 0 :
4517	    (vm_size_t)stack_guard_page * PAGE_SIZE;
4518	if (sgp >= max_ssize)
4519		return (KERN_INVALID_ARGUMENT);
4520
4521	init_ssize = growsize;
4522	if (max_ssize < init_ssize + sgp)
4523		init_ssize = max_ssize - sgp;
4524
4525	/* If addr is already mapped, no go */
4526	if (vm_map_lookup_entry(map, addrbos, &prev_entry))
4527		return (KERN_NO_SPACE);
4528
4529	/*
4530	 * If we can't accommodate max_ssize in the current mapping, no go.
4531	 */
4532	if (vm_map_entry_succ(prev_entry)->start < addrbos + max_ssize)
4533		return (KERN_NO_SPACE);
4534
4535	/*
4536	 * We initially map a stack of only init_ssize.  We will grow as
4537	 * needed later.  Depending on the orientation of the stack (i.e.
4538	 * the grow direction) we either map at the top of the range, the
4539	 * bottom of the range or in the middle.
4540	 *
4541	 * Note: we would normally expect prot and max to be VM_PROT_ALL,
4542	 * and cow to be 0.  Possibly we should eliminate these as input
4543	 * parameters, and just pass these values here in the insert call.
4544	 */
4545	if (orient == MAP_STACK_GROWS_DOWN) {
4546		bot = addrbos + max_ssize - init_ssize;
4547		top = bot + init_ssize;
4548		gap_bot = addrbos;
4549		gap_top = bot;
4550	} else /* if (orient == MAP_STACK_GROWS_UP) */ {
4551		bot = addrbos;
4552		top = bot + init_ssize;
4553		gap_bot = top;
4554		gap_top = addrbos + max_ssize;
4555	}
4556	rv = vm_map_insert(map, NULL, 0, bot, top, prot, max, cow);
4557	if (rv != KERN_SUCCESS)
4558		return (rv);
4559	new_entry = vm_map_entry_succ(prev_entry);
4560	KASSERT(new_entry->end == top || new_entry->start == bot,
4561	    ("Bad entry start/end for new stack entry"));
4562	KASSERT((orient & MAP_STACK_GROWS_DOWN) == 0 ||
4563	    (new_entry->eflags & MAP_ENTRY_GROWS_DOWN) != 0,
4564	    ("new entry lacks MAP_ENTRY_GROWS_DOWN"));
4565	KASSERT((orient & MAP_STACK_GROWS_UP) == 0 ||
4566	    (new_entry->eflags & MAP_ENTRY_GROWS_UP) != 0,
4567	    ("new entry lacks MAP_ENTRY_GROWS_UP"));
4568	if (gap_bot == gap_top)
4569		return (KERN_SUCCESS);
4570	rv = vm_map_insert(map, NULL, 0, gap_bot, gap_top, VM_PROT_NONE,
4571	    VM_PROT_NONE, MAP_CREATE_GUARD | (orient == MAP_STACK_GROWS_DOWN ?
4572	    MAP_CREATE_STACK_GAP_DN : MAP_CREATE_STACK_GAP_UP));
4573	if (rv == KERN_SUCCESS) {
4574		/*
4575		 * Gap can never successfully handle a fault, so
4576		 * read-ahead logic is never used for it.  Re-use
4577		 * next_read of the gap entry to store
4578		 * stack_guard_page for vm_map_growstack().
4579		 */
4580		if (orient == MAP_STACK_GROWS_DOWN)
4581			vm_map_entry_pred(new_entry)->next_read = sgp;
4582		else
4583			vm_map_entry_succ(new_entry)->next_read = sgp;
4584	} else {
4585		(void)vm_map_delete(map, bot, top);
4586	}
4587	return (rv);
4588}
4589
4590/*
4591 * Attempts to grow a vm stack entry.  Returns KERN_SUCCESS if we
4592 * successfully grow the stack.
4593 */
4594static int
4595vm_map_growstack(vm_map_t map, vm_offset_t addr, vm_map_entry_t gap_entry)
4596{
4597	vm_map_entry_t stack_entry;
4598	struct proc *p;
4599	struct vmspace *vm;
4600	struct ucred *cred;
4601	vm_offset_t gap_end, gap_start, grow_start;
4602	vm_size_t grow_amount, guard, max_grow;
4603	rlim_t lmemlim, stacklim, vmemlim;
4604	int rv, rv1;
4605	bool gap_deleted, grow_down, is_procstack;
4606#ifdef notyet
4607	uint64_t limit;
4608#endif
4609#ifdef RACCT
4610	int error;
4611#endif
4612
4613	p = curproc;
4614	vm = p->p_vmspace;
4615
4616	/*
4617	 * Disallow stack growth when the access is performed by a
4618	 * debugger or AIO daemon.  The reason is that the wrong
4619	 * resource limits are applied.
4620	 */
4621	if (p != initproc && (map != &p->p_vmspace->vm_map ||
4622	    p->p_textvp == NULL))
4623		return (KERN_FAILURE);
4624
4625	MPASS(!map->system_map);
4626
4627	lmemlim = lim_cur(curthread, RLIMIT_MEMLOCK);
4628	stacklim = lim_cur(curthread, RLIMIT_STACK);
4629	vmemlim = lim_cur(curthread, RLIMIT_VMEM);
4630retry:
4631	/* If addr is not in a hole for a stack grow area, no need to grow. */
4632	if (gap_entry == NULL && !vm_map_lookup_entry(map, addr, &gap_entry))
4633		return (KERN_FAILURE);
4634	if ((gap_entry->eflags & MAP_ENTRY_GUARD) == 0)
4635		return (KERN_SUCCESS);
4636	if ((gap_entry->eflags & MAP_ENTRY_STACK_GAP_DN) != 0) {
4637		stack_entry = vm_map_entry_succ(gap_entry);
4638		if ((stack_entry->eflags & MAP_ENTRY_GROWS_DOWN) == 0 ||
4639		    stack_entry->start != gap_entry->end)
4640			return (KERN_FAILURE);
4641		grow_amount = round_page(stack_entry->start - addr);
4642		grow_down = true;
4643	} else if ((gap_entry->eflags & MAP_ENTRY_STACK_GAP_UP) != 0) {
4644		stack_entry = vm_map_entry_pred(gap_entry);
4645		if ((stack_entry->eflags & MAP_ENTRY_GROWS_UP) == 0 ||
4646		    stack_entry->end != gap_entry->start)
4647			return (KERN_FAILURE);
4648		grow_amount = round_page(addr + 1 - stack_entry->end);
4649		grow_down = false;
4650	} else {
4651		return (KERN_FAILURE);
4652	}
4653	guard = ((curproc->p_flag2 & P2_STKGAP_DISABLE) != 0 ||
4654	    (curproc->p_fctl0 & NT_FREEBSD_FCTL_STKGAP_DISABLE) != 0) ? 0 :
4655	    gap_entry->next_read;
4656	max_grow = gap_entry->end - gap_entry->start;
4657	if (guard > max_grow)
4658		return (KERN_NO_SPACE);
4659	max_grow -= guard;
4660	if (grow_amount > max_grow)
4661		return (KERN_NO_SPACE);
4662
4663	/*
4664	 * If this is the main process stack, see if we're over the stack
4665	 * limit.
4666	 */
4667	is_procstack = addr >= (vm_offset_t)vm->vm_maxsaddr &&
4668	    addr < (vm_offset_t)p->p_sysent->sv_usrstack;
4669	if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > stacklim))
4670		return (KERN_NO_SPACE);
4671
4672#ifdef RACCT
4673	if (racct_enable) {
4674		PROC_LOCK(p);
4675		if (is_procstack && racct_set(p, RACCT_STACK,
4676		    ctob(vm->vm_ssize) + grow_amount)) {
4677			PROC_UNLOCK(p);
4678			return (KERN_NO_SPACE);
4679		}
4680		PROC_UNLOCK(p);
4681	}
4682#endif
4683
4684	grow_amount = roundup(grow_amount, sgrowsiz);
4685	if (grow_amount > max_grow)
4686		grow_amount = max_grow;
4687	if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > stacklim)) {
4688		grow_amount = trunc_page((vm_size_t)stacklim) -
4689		    ctob(vm->vm_ssize);
4690	}
4691
4692#ifdef notyet
4693	PROC_LOCK(p);
4694	limit = racct_get_available(p, RACCT_STACK);
4695	PROC_UNLOCK(p);
4696	if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > limit))
4697		grow_amount = limit - ctob(vm->vm_ssize);
4698#endif
4699
4700	if (!old_mlock && (map->flags & MAP_WIREFUTURE) != 0) {
4701		if (ptoa(pmap_wired_count(map->pmap)) + grow_amount > lmemlim) {
4702			rv = KERN_NO_SPACE;
4703			goto out;
4704		}
4705#ifdef RACCT
4706		if (racct_enable) {
4707			PROC_LOCK(p);
4708			if (racct_set(p, RACCT_MEMLOCK,
4709			    ptoa(pmap_wired_count(map->pmap)) + grow_amount)) {
4710				PROC_UNLOCK(p);
4711				rv = KERN_NO_SPACE;
4712				goto out;
4713			}
4714			PROC_UNLOCK(p);
4715		}
4716#endif
4717	}
4718
4719	/* If we would blow our VMEM resource limit, no go */
4720	if (map->size + grow_amount > vmemlim) {
4721		rv = KERN_NO_SPACE;
4722		goto out;
4723	}
4724#ifdef RACCT
4725	if (racct_enable) {
4726		PROC_LOCK(p);
4727		if (racct_set(p, RACCT_VMEM, map->size + grow_amount)) {
4728			PROC_UNLOCK(p);
4729			rv = KERN_NO_SPACE;
4730			goto out;
4731		}
4732		PROC_UNLOCK(p);
4733	}
4734#endif
4735
4736	if (vm_map_lock_upgrade(map)) {
4737		gap_entry = NULL;
4738		vm_map_lock_read(map);
4739		goto retry;
4740	}
4741
4742	if (grow_down) {
4743		grow_start = gap_entry->end - grow_amount;
4744		if (gap_entry->start + grow_amount == gap_entry->end) {
4745			gap_start = gap_entry->start;
4746			gap_end = gap_entry->end;
4747			vm_map_entry_delete(map, gap_entry);
4748			gap_deleted = true;
4749		} else {
4750			MPASS(gap_entry->start < gap_entry->end - grow_amount);
4751			vm_map_entry_resize(map, gap_entry, -grow_amount);
4752			gap_deleted = false;
4753		}
4754		rv = vm_map_insert(map, NULL, 0, grow_start,
4755		    grow_start + grow_amount,
4756		    stack_entry->protection, stack_entry->max_protection,
4757		    MAP_STACK_GROWS_DOWN);
4758		if (rv != KERN_SUCCESS) {
4759			if (gap_deleted) {
4760				rv1 = vm_map_insert(map, NULL, 0, gap_start,
4761				    gap_end, VM_PROT_NONE, VM_PROT_NONE,
4762				    MAP_CREATE_GUARD | MAP_CREATE_STACK_GAP_DN);
4763				MPASS(rv1 == KERN_SUCCESS);
4764			} else
4765				vm_map_entry_resize(map, gap_entry,
4766				    grow_amount);
4767		}
4768	} else {
4769		grow_start = stack_entry->end;
4770		cred = stack_entry->cred;
4771		if (cred == NULL && stack_entry->object.vm_object != NULL)
4772			cred = stack_entry->object.vm_object->cred;
4773		if (cred != NULL && !swap_reserve_by_cred(grow_amount, cred))
4774			rv = KERN_NO_SPACE;
4775		/* Grow the underlying object if applicable. */
4776		else if (stack_entry->object.vm_object == NULL ||
4777		    vm_object_coalesce(stack_entry->object.vm_object,
4778		    stack_entry->offset,
4779		    (vm_size_t)(stack_entry->end - stack_entry->start),
4780		    grow_amount, cred != NULL)) {
4781			if (gap_entry->start + grow_amount == gap_entry->end) {
4782				vm_map_entry_delete(map, gap_entry);
4783				vm_map_entry_resize(map, stack_entry,
4784				    grow_amount);
4785			} else {
4786				gap_entry->start += grow_amount;
4787				stack_entry->end += grow_amount;
4788			}
4789			map->size += grow_amount;
4790			rv = KERN_SUCCESS;
4791		} else
4792			rv = KERN_FAILURE;
4793	}
4794	if (rv == KERN_SUCCESS && is_procstack)
4795		vm->vm_ssize += btoc(grow_amount);
4796
4797	/*
4798	 * Heed the MAP_WIREFUTURE flag if it was set for this process.
4799	 */
4800	if (rv == KERN_SUCCESS && (map->flags & MAP_WIREFUTURE) != 0) {
4801		rv = vm_map_wire_locked(map, grow_start,
4802		    grow_start + grow_amount,
4803		    VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES);
4804	}
4805	vm_map_lock_downgrade(map);
4806
4807out:
4808#ifdef RACCT
4809	if (racct_enable && rv != KERN_SUCCESS) {
4810		PROC_LOCK(p);
4811		error = racct_set(p, RACCT_VMEM, map->size);
4812		KASSERT(error == 0, ("decreasing RACCT_VMEM failed"));
4813		if (!old_mlock) {
4814			error = racct_set(p, RACCT_MEMLOCK,
4815			    ptoa(pmap_wired_count(map->pmap)));
4816			KASSERT(error == 0, ("decreasing RACCT_MEMLOCK failed"));
4817		}
4818	    	error = racct_set(p, RACCT_STACK, ctob(vm->vm_ssize));
4819		KASSERT(error == 0, ("decreasing RACCT_STACK failed"));
4820		PROC_UNLOCK(p);
4821	}
4822#endif
4823
4824	return (rv);
4825}
4826
4827/*
4828 * Unshare the specified VM space for exec.  If other processes are
4829 * mapped to it, then create a new one.  The new vmspace is null.
4830 */
4831int
4832vmspace_exec(struct proc *p, vm_offset_t minuser, vm_offset_t maxuser)
4833{
4834	struct vmspace *oldvmspace = p->p_vmspace;
4835	struct vmspace *newvmspace;
4836
4837	KASSERT((curthread->td_pflags & TDP_EXECVMSPC) == 0,
4838	    ("vmspace_exec recursed"));
4839	newvmspace = vmspace_alloc(minuser, maxuser, pmap_pinit);
4840	if (newvmspace == NULL)
4841		return (ENOMEM);
4842	newvmspace->vm_swrss = oldvmspace->vm_swrss;
4843	/*
4844	 * This code is written like this for prototype purposes.  The
4845	 * goal is to avoid running down the vmspace here, but let the
4846	 * other process's that are still using the vmspace to finally
4847	 * run it down.  Even though there is little or no chance of blocking
4848	 * here, it is a good idea to keep this form for future mods.
4849	 */
4850	PROC_VMSPACE_LOCK(p);
4851	p->p_vmspace = newvmspace;
4852	PROC_VMSPACE_UNLOCK(p);
4853	if (p == curthread->td_proc)
4854		pmap_activate(curthread);
4855	curthread->td_pflags |= TDP_EXECVMSPC;
4856	return (0);
4857}
4858
4859/*
4860 * Unshare the specified VM space for forcing COW.  This
4861 * is called by rfork, for the (RFMEM|RFPROC) == 0 case.
4862 */
4863int
4864vmspace_unshare(struct proc *p)
4865{
4866	struct vmspace *oldvmspace = p->p_vmspace;
4867	struct vmspace *newvmspace;
4868	vm_ooffset_t fork_charge;
4869
4870	/*
4871	 * The caller is responsible for ensuring that the reference count
4872	 * cannot concurrently transition 1 -> 2.
4873	 */
4874	if (refcount_load(&oldvmspace->vm_refcnt) == 1)
4875		return (0);
4876	fork_charge = 0;
4877	newvmspace = vmspace_fork(oldvmspace, &fork_charge);
4878	if (newvmspace == NULL)
4879		return (ENOMEM);
4880	if (!swap_reserve_by_cred(fork_charge, p->p_ucred)) {
4881		vmspace_free(newvmspace);
4882		return (ENOMEM);
4883	}
4884	PROC_VMSPACE_LOCK(p);
4885	p->p_vmspace = newvmspace;
4886	PROC_VMSPACE_UNLOCK(p);
4887	if (p == curthread->td_proc)
4888		pmap_activate(curthread);
4889	vmspace_free(oldvmspace);
4890	return (0);
4891}
4892
4893/*
4894 *	vm_map_lookup:
4895 *
4896 *	Finds the VM object, offset, and
4897 *	protection for a given virtual address in the
4898 *	specified map, assuming a page fault of the
4899 *	type specified.
4900 *
4901 *	Leaves the map in question locked for read; return
4902 *	values are guaranteed until a vm_map_lookup_done
4903 *	call is performed.  Note that the map argument
4904 *	is in/out; the returned map must be used in
4905 *	the call to vm_map_lookup_done.
4906 *
4907 *	A handle (out_entry) is returned for use in
4908 *	vm_map_lookup_done, to make that fast.
4909 *
4910 *	If a lookup is requested with "write protection"
4911 *	specified, the map may be changed to perform virtual
4912 *	copying operations, although the data referenced will
4913 *	remain the same.
4914 */
4915int
4916vm_map_lookup(vm_map_t *var_map,		/* IN/OUT */
4917	      vm_offset_t vaddr,
4918	      vm_prot_t fault_typea,
4919	      vm_map_entry_t *out_entry,	/* OUT */
4920	      vm_object_t *object,		/* OUT */
4921	      vm_pindex_t *pindex,		/* OUT */
4922	      vm_prot_t *out_prot,		/* OUT */
4923	      boolean_t *wired)			/* OUT */
4924{
4925	vm_map_entry_t entry;
4926	vm_map_t map = *var_map;
4927	vm_prot_t prot;
4928	vm_prot_t fault_type;
4929	vm_object_t eobject;
4930	vm_size_t size;
4931	struct ucred *cred;
4932
4933RetryLookup:
4934
4935	vm_map_lock_read(map);
4936
4937RetryLookupLocked:
4938	/*
4939	 * Lookup the faulting address.
4940	 */
4941	if (!vm_map_lookup_entry(map, vaddr, out_entry)) {
4942		vm_map_unlock_read(map);
4943		return (KERN_INVALID_ADDRESS);
4944	}
4945
4946	entry = *out_entry;
4947
4948	/*
4949	 * Handle submaps.
4950	 */
4951	if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
4952		vm_map_t old_map = map;
4953
4954		*var_map = map = entry->object.sub_map;
4955		vm_map_unlock_read(old_map);
4956		goto RetryLookup;
4957	}
4958
4959	/*
4960	 * Check whether this task is allowed to have this page.
4961	 */
4962	prot = entry->protection;
4963	if ((fault_typea & VM_PROT_FAULT_LOOKUP) != 0) {
4964		fault_typea &= ~VM_PROT_FAULT_LOOKUP;
4965		if (prot == VM_PROT_NONE && map != kernel_map &&
4966		    (entry->eflags & MAP_ENTRY_GUARD) != 0 &&
4967		    (entry->eflags & (MAP_ENTRY_STACK_GAP_DN |
4968		    MAP_ENTRY_STACK_GAP_UP)) != 0 &&
4969		    vm_map_growstack(map, vaddr, entry) == KERN_SUCCESS)
4970			goto RetryLookupLocked;
4971	}
4972	fault_type = fault_typea & VM_PROT_ALL;
4973	if ((fault_type & prot) != fault_type || prot == VM_PROT_NONE) {
4974		vm_map_unlock_read(map);
4975		return (KERN_PROTECTION_FAILURE);
4976	}
4977	KASSERT((prot & VM_PROT_WRITE) == 0 || (entry->eflags &
4978	    (MAP_ENTRY_USER_WIRED | MAP_ENTRY_NEEDS_COPY)) !=
4979	    (MAP_ENTRY_USER_WIRED | MAP_ENTRY_NEEDS_COPY),
4980	    ("entry %p flags %x", entry, entry->eflags));
4981	if ((fault_typea & VM_PROT_COPY) != 0 &&
4982	    (entry->max_protection & VM_PROT_WRITE) == 0 &&
4983	    (entry->eflags & MAP_ENTRY_COW) == 0) {
4984		vm_map_unlock_read(map);
4985		return (KERN_PROTECTION_FAILURE);
4986	}
4987
4988	/*
4989	 * If this page is not pageable, we have to get it for all possible
4990	 * accesses.
4991	 */
4992	*wired = (entry->wired_count != 0);
4993	if (*wired)
4994		fault_type = entry->protection;
4995	size = entry->end - entry->start;
4996
4997	/*
4998	 * If the entry was copy-on-write, we either ...
4999	 */
5000	if (entry->eflags & MAP_ENTRY_NEEDS_COPY) {
5001		/*
5002		 * If we want to write the page, we may as well handle that
5003		 * now since we've got the map locked.
5004		 *
5005		 * If we don't need to write the page, we just demote the
5006		 * permissions allowed.
5007		 */
5008		if ((fault_type & VM_PROT_WRITE) != 0 ||
5009		    (fault_typea & VM_PROT_COPY) != 0) {
5010			/*
5011			 * Make a new object, and place it in the object
5012			 * chain.  Note that no new references have appeared
5013			 * -- one just moved from the map to the new
5014			 * object.
5015			 */
5016			if (vm_map_lock_upgrade(map))
5017				goto RetryLookup;
5018
5019			if (entry->cred == NULL) {
5020				/*
5021				 * The debugger owner is charged for
5022				 * the memory.
5023				 */
5024				cred = curthread->td_ucred;
5025				crhold(cred);
5026				if (!swap_reserve_by_cred(size, cred)) {
5027					crfree(cred);
5028					vm_map_unlock(map);
5029					return (KERN_RESOURCE_SHORTAGE);
5030				}
5031				entry->cred = cred;
5032			}
5033			eobject = entry->object.vm_object;
5034			vm_object_shadow(&entry->object.vm_object,
5035			    &entry->offset, size, entry->cred, false);
5036			if (eobject == entry->object.vm_object) {
5037				/*
5038				 * The object was not shadowed.
5039				 */
5040				swap_release_by_cred(size, entry->cred);
5041				crfree(entry->cred);
5042			}
5043			entry->cred = NULL;
5044			entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
5045
5046			vm_map_lock_downgrade(map);
5047		} else {
5048			/*
5049			 * We're attempting to read a copy-on-write page --
5050			 * don't allow writes.
5051			 */
5052			prot &= ~VM_PROT_WRITE;
5053		}
5054	}
5055
5056	/*
5057	 * Create an object if necessary.
5058	 */
5059	if (entry->object.vm_object == NULL && !map->system_map) {
5060		if (vm_map_lock_upgrade(map))
5061			goto RetryLookup;
5062		entry->object.vm_object = vm_object_allocate_anon(atop(size),
5063		    NULL, entry->cred, entry->cred != NULL ? size : 0);
5064		entry->offset = 0;
5065		entry->cred = NULL;
5066		vm_map_lock_downgrade(map);
5067	}
5068
5069	/*
5070	 * Return the object/offset from this entry.  If the entry was
5071	 * copy-on-write or empty, it has been fixed up.
5072	 */
5073	*pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset);
5074	*object = entry->object.vm_object;
5075
5076	*out_prot = prot;
5077	return (KERN_SUCCESS);
5078}
5079
5080/*
5081 *	vm_map_lookup_locked:
5082 *
5083 *	Lookup the faulting address.  A version of vm_map_lookup that returns
5084 *      KERN_FAILURE instead of blocking on map lock or memory allocation.
5085 */
5086int
5087vm_map_lookup_locked(vm_map_t *var_map,		/* IN/OUT */
5088		     vm_offset_t vaddr,
5089		     vm_prot_t fault_typea,
5090		     vm_map_entry_t *out_entry,	/* OUT */
5091		     vm_object_t *object,	/* OUT */
5092		     vm_pindex_t *pindex,	/* OUT */
5093		     vm_prot_t *out_prot,	/* OUT */
5094		     boolean_t *wired)		/* OUT */
5095{
5096	vm_map_entry_t entry;
5097	vm_map_t map = *var_map;
5098	vm_prot_t prot;
5099	vm_prot_t fault_type = fault_typea;
5100
5101	/*
5102	 * Lookup the faulting address.
5103	 */
5104	if (!vm_map_lookup_entry(map, vaddr, out_entry))
5105		return (KERN_INVALID_ADDRESS);
5106
5107	entry = *out_entry;
5108
5109	/*
5110	 * Fail if the entry refers to a submap.
5111	 */
5112	if (entry->eflags & MAP_ENTRY_IS_SUB_MAP)
5113		return (KERN_FAILURE);
5114
5115	/*
5116	 * Check whether this task is allowed to have this page.
5117	 */
5118	prot = entry->protection;
5119	fault_type &= VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE;
5120	if ((fault_type & prot) != fault_type)
5121		return (KERN_PROTECTION_FAILURE);
5122
5123	/*
5124	 * If this page is not pageable, we have to get it for all possible
5125	 * accesses.
5126	 */
5127	*wired = (entry->wired_count != 0);
5128	if (*wired)
5129		fault_type = entry->protection;
5130
5131	if (entry->eflags & MAP_ENTRY_NEEDS_COPY) {
5132		/*
5133		 * Fail if the entry was copy-on-write for a write fault.
5134		 */
5135		if (fault_type & VM_PROT_WRITE)
5136			return (KERN_FAILURE);
5137		/*
5138		 * We're attempting to read a copy-on-write page --
5139		 * don't allow writes.
5140		 */
5141		prot &= ~VM_PROT_WRITE;
5142	}
5143
5144	/*
5145	 * Fail if an object should be created.
5146	 */
5147	if (entry->object.vm_object == NULL && !map->system_map)
5148		return (KERN_FAILURE);
5149
5150	/*
5151	 * Return the object/offset from this entry.  If the entry was
5152	 * copy-on-write or empty, it has been fixed up.
5153	 */
5154	*pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset);
5155	*object = entry->object.vm_object;
5156
5157	*out_prot = prot;
5158	return (KERN_SUCCESS);
5159}
5160
5161/*
5162 *	vm_map_lookup_done:
5163 *
5164 *	Releases locks acquired by a vm_map_lookup
5165 *	(according to the handle returned by that lookup).
5166 */
5167void
5168vm_map_lookup_done(vm_map_t map, vm_map_entry_t entry)
5169{
5170	/*
5171	 * Unlock the main-level map
5172	 */
5173	vm_map_unlock_read(map);
5174}
5175
5176vm_offset_t
5177vm_map_max_KBI(const struct vm_map *map)
5178{
5179
5180	return (vm_map_max(map));
5181}
5182
5183vm_offset_t
5184vm_map_min_KBI(const struct vm_map *map)
5185{
5186
5187	return (vm_map_min(map));
5188}
5189
5190pmap_t
5191vm_map_pmap_KBI(vm_map_t map)
5192{
5193
5194	return (map->pmap);
5195}
5196
5197bool
5198vm_map_range_valid_KBI(vm_map_t map, vm_offset_t start, vm_offset_t end)
5199{
5200
5201	return (vm_map_range_valid(map, start, end));
5202}
5203
5204#ifdef INVARIANTS
5205static void
5206_vm_map_assert_consistent(vm_map_t map, int check)
5207{
5208	vm_map_entry_t entry, prev;
5209	vm_map_entry_t cur, header, lbound, ubound;
5210	vm_size_t max_left, max_right;
5211
5212#ifdef DIAGNOSTIC
5213	++map->nupdates;
5214#endif
5215	if (enable_vmmap_check != check)
5216		return;
5217
5218	header = prev = &map->header;
5219	VM_MAP_ENTRY_FOREACH(entry, map) {
5220		KASSERT(prev->end <= entry->start,
5221		    ("map %p prev->end = %jx, start = %jx", map,
5222		    (uintmax_t)prev->end, (uintmax_t)entry->start));
5223		KASSERT(entry->start < entry->end,
5224		    ("map %p start = %jx, end = %jx", map,
5225		    (uintmax_t)entry->start, (uintmax_t)entry->end));
5226		KASSERT(entry->left == header ||
5227		    entry->left->start < entry->start,
5228		    ("map %p left->start = %jx, start = %jx", map,
5229		    (uintmax_t)entry->left->start, (uintmax_t)entry->start));
5230		KASSERT(entry->right == header ||
5231		    entry->start < entry->right->start,
5232		    ("map %p start = %jx, right->start = %jx", map,
5233		    (uintmax_t)entry->start, (uintmax_t)entry->right->start));
5234		cur = map->root;
5235		lbound = ubound = header;
5236		for (;;) {
5237			if (entry->start < cur->start) {
5238				ubound = cur;
5239				cur = cur->left;
5240				KASSERT(cur != lbound,
5241				    ("map %p cannot find %jx",
5242				    map, (uintmax_t)entry->start));
5243			} else if (cur->end <= entry->start) {
5244				lbound = cur;
5245				cur = cur->right;
5246				KASSERT(cur != ubound,
5247				    ("map %p cannot find %jx",
5248				    map, (uintmax_t)entry->start));
5249			} else {
5250				KASSERT(cur == entry,
5251				    ("map %p cannot find %jx",
5252				    map, (uintmax_t)entry->start));
5253				break;
5254			}
5255		}
5256		max_left = vm_map_entry_max_free_left(entry, lbound);
5257		max_right = vm_map_entry_max_free_right(entry, ubound);
5258		KASSERT(entry->max_free == vm_size_max(max_left, max_right),
5259		    ("map %p max = %jx, max_left = %jx, max_right = %jx", map,
5260		    (uintmax_t)entry->max_free,
5261		    (uintmax_t)max_left, (uintmax_t)max_right));
5262		prev = entry;
5263	}
5264	KASSERT(prev->end <= entry->start,
5265	    ("map %p prev->end = %jx, start = %jx", map,
5266	    (uintmax_t)prev->end, (uintmax_t)entry->start));
5267}
5268#endif
5269
5270#include "opt_ddb.h"
5271#ifdef DDB
5272#include <sys/kernel.h>
5273
5274#include <ddb/ddb.h>
5275
5276static void
5277vm_map_print(vm_map_t map)
5278{
5279	vm_map_entry_t entry, prev;
5280
5281	db_iprintf("Task map %p: pmap=%p, nentries=%d, version=%u\n",
5282	    (void *)map,
5283	    (void *)map->pmap, map->nentries, map->timestamp);
5284
5285	db_indent += 2;
5286	prev = &map->header;
5287	VM_MAP_ENTRY_FOREACH(entry, map) {
5288		db_iprintf("map entry %p: start=%p, end=%p, eflags=%#x, \n",
5289		    (void *)entry, (void *)entry->start, (void *)entry->end,
5290		    entry->eflags);
5291		{
5292			static const char * const inheritance_name[4] =
5293			{"share", "copy", "none", "donate_copy"};
5294
5295			db_iprintf(" prot=%x/%x/%s",
5296			    entry->protection,
5297			    entry->max_protection,
5298			    inheritance_name[(int)(unsigned char)
5299			    entry->inheritance]);
5300			if (entry->wired_count != 0)
5301				db_printf(", wired");
5302		}
5303		if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
5304			db_printf(", share=%p, offset=0x%jx\n",
5305			    (void *)entry->object.sub_map,
5306			    (uintmax_t)entry->offset);
5307			if (prev == &map->header ||
5308			    prev->object.sub_map !=
5309				entry->object.sub_map) {
5310				db_indent += 2;
5311				vm_map_print((vm_map_t)entry->object.sub_map);
5312				db_indent -= 2;
5313			}
5314		} else {
5315			if (entry->cred != NULL)
5316				db_printf(", ruid %d", entry->cred->cr_ruid);
5317			db_printf(", object=%p, offset=0x%jx",
5318			    (void *)entry->object.vm_object,
5319			    (uintmax_t)entry->offset);
5320			if (entry->object.vm_object && entry->object.vm_object->cred)
5321				db_printf(", obj ruid %d charge %jx",
5322				    entry->object.vm_object->cred->cr_ruid,
5323				    (uintmax_t)entry->object.vm_object->charge);
5324			if (entry->eflags & MAP_ENTRY_COW)
5325				db_printf(", copy (%s)",
5326				    (entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done");
5327			db_printf("\n");
5328
5329			if (prev == &map->header ||
5330			    prev->object.vm_object !=
5331				entry->object.vm_object) {
5332				db_indent += 2;
5333				vm_object_print((db_expr_t)(intptr_t)
5334						entry->object.vm_object,
5335						0, 0, (char *)0);
5336				db_indent -= 2;
5337			}
5338		}
5339		prev = entry;
5340	}
5341	db_indent -= 2;
5342}
5343
5344DB_SHOW_COMMAND(map, map)
5345{
5346
5347	if (!have_addr) {
5348		db_printf("usage: show map <addr>\n");
5349		return;
5350	}
5351	vm_map_print((vm_map_t)addr);
5352}
5353
5354DB_SHOW_COMMAND(procvm, procvm)
5355{
5356	struct proc *p;
5357
5358	if (have_addr) {
5359		p = db_lookup_proc(addr);
5360	} else {
5361		p = curproc;
5362	}
5363
5364	db_printf("p = %p, vmspace = %p, map = %p, pmap = %p\n",
5365	    (void *)p, (void *)p->p_vmspace, (void *)&p->p_vmspace->vm_map,
5366	    (void *)vmspace_pmap(p->p_vmspace));
5367
5368	vm_map_print((vm_map_t)&p->p_vmspace->vm_map);
5369}
5370
5371#endif /* DDB */
5372