vm_kern.c revision 266588
1139825Simp/*-
21541Srgrimes * Copyright (c) 1991, 1993
31541Srgrimes *	The Regents of the University of California.  All rights reserved.
41541Srgrimes *
51541Srgrimes * This code is derived from software contributed to Berkeley by
61541Srgrimes * The Mach Operating System project at Carnegie-Mellon University.
71541Srgrimes *
81541Srgrimes * Redistribution and use in source and binary forms, with or without
91541Srgrimes * modification, are permitted provided that the following conditions
101541Srgrimes * are met:
111541Srgrimes * 1. Redistributions of source code must retain the above copyright
121541Srgrimes *    notice, this list of conditions and the following disclaimer.
131541Srgrimes * 2. Redistributions in binary form must reproduce the above copyright
141541Srgrimes *    notice, this list of conditions and the following disclaimer in the
151541Srgrimes *    documentation and/or other materials provided with the distribution.
161541Srgrimes * 4. Neither the name of the University nor the names of its contributors
171541Srgrimes *    may be used to endorse or promote products derived from this software
181541Srgrimes *    without specific prior written permission.
191541Srgrimes *
201541Srgrimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
211541Srgrimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
221541Srgrimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
231541Srgrimes * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
241541Srgrimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
251541Srgrimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
261541Srgrimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
271541Srgrimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
281541Srgrimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
291541Srgrimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
301541Srgrimes * SUCH DAMAGE.
311541Srgrimes *
321817Sdg *	from: @(#)vm_kern.c	8.3 (Berkeley) 1/12/94
331541Srgrimes *
341541Srgrimes *
351541Srgrimes * Copyright (c) 1987, 1990 Carnegie-Mellon University.
361541Srgrimes * All rights reserved.
371541Srgrimes *
381541Srgrimes * Authors: Avadis Tevanian, Jr., Michael Wayne Young
395455Sdg *
401541Srgrimes * Permission to use, copy, modify and distribute this software and
411541Srgrimes * its documentation is hereby granted, provided that both the copyright
421541Srgrimes * notice and this permission notice appear in all copies of the
431541Srgrimes * software, derivative works or modified versions, and any portions
441541Srgrimes * thereof, and that both notices appear in supporting documentation.
455455Sdg *
465455Sdg * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
475455Sdg * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
481541Srgrimes * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
495455Sdg *
501541Srgrimes * Carnegie Mellon requests users of this software to return to
511541Srgrimes *
521541Srgrimes *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
531541Srgrimes *  School of Computer Science
541541Srgrimes *  Carnegie Mellon University
551541Srgrimes *  Pittsburgh PA 15213-3890
561541Srgrimes *
571541Srgrimes * any improvements or extensions that they make and grant Carnegie the
581541Srgrimes * rights to redistribute these changes.
591541Srgrimes */
601541Srgrimes
611541Srgrimes/*
621541Srgrimes *	Kernel memory management.
631541Srgrimes */
641541Srgrimes
65116226Sobrien#include <sys/cdefs.h>
66116226Sobrien__FBSDID("$FreeBSD: head/sys/vm/vm_kern.c 266588 2014-05-23 16:22:36Z alc $");
67116226Sobrien
681541Srgrimes#include <sys/param.h>
691541Srgrimes#include <sys/systm.h>
7087157Sluigi#include <sys/kernel.h>		/* for ticks and hz */
71168395Spjd#include <sys/eventhandler.h>
7276166Smarkm#include <sys/lock.h>
732112Swollman#include <sys/proc.h>
746129Sdg#include <sys/malloc.h>
75248084Sattilio#include <sys/rwlock.h>
76188964Srwatson#include <sys/sysctl.h>
77254025Sjeff#include <sys/vmem.h>
781541Srgrimes
791541Srgrimes#include <vm/vm.h>
8012662Sdg#include <vm/vm_param.h>
81254025Sjeff#include <vm/vm_kern.h>
8212662Sdg#include <vm/pmap.h>
8312662Sdg#include <vm/vm_map.h>
8412662Sdg#include <vm/vm_object.h>
851541Srgrimes#include <vm/vm_page.h>
861541Srgrimes#include <vm/vm_pageout.h>
8712726Sbde#include <vm/vm_extern.h>
88168395Spjd#include <vm/uma.h>
891541Srgrimes
90248277Skibvm_map_t kernel_map;
91248277Skibvm_map_t exec_map;
92118764Ssilbyvm_map_t pipe_map;
932112Swollman
94221853Smdfconst void *zero_region;
95221853SmdfCTASSERT((ZERO_REGION_SIZE & PAGE_MASK) == 0);
96221853Smdf
97246316SmariusSYSCTL_ULONG(_vm, OID_AUTO, min_kernel_address, CTLFLAG_RD,
98246316Smarius    NULL, VM_MIN_KERNEL_ADDRESS, "Min kernel address");
99246316Smarius
100246316SmariusSYSCTL_ULONG(_vm, OID_AUTO, max_kernel_address, CTLFLAG_RD,
101246926Salc#if defined(__arm__) || defined(__sparc64__)
102246316Smarius    &vm_max_kernel_address, 0,
103246316Smarius#else
104246316Smarius    NULL, VM_MAX_KERNEL_ADDRESS,
105246316Smarius#endif
106246316Smarius    "Max kernel address");
107246316Smarius
1081541Srgrimes/*
109254025Sjeff *	kva_alloc:
11047841Sdt *
111118317Salc *	Allocate a virtual address range with no underlying object and
112118317Salc *	no initial mapping to physical memory.  Any mapping from this
113118317Salc *	range to physical memory must be explicitly created prior to
114118317Salc *	its use, typically with pmap_qenter().  Any attempt to create
115118317Salc *	a mapping on demand through vm_fault() will result in a panic.
11647841Sdt */
11747841Sdtvm_offset_t
118254025Sjeffkva_alloc(size)
11970480Salfred	vm_size_t size;
12047841Sdt{
12147841Sdt	vm_offset_t addr;
12247841Sdt
12347841Sdt	size = round_page(size);
124254025Sjeff	if (vmem_alloc(kernel_arena, size, M_BESTFIT | M_NOWAIT, &addr))
12547841Sdt		return (0);
126254025Sjeff
12747841Sdt	return (addr);
12847841Sdt}
12947841Sdt
13047841Sdt/*
131254025Sjeff *	kva_free:
132206819Sjmallett *
133254025Sjeff *	Release a region of kernel virtual memory allocated
134254025Sjeff *	with kva_alloc, and return the physical pages
135254025Sjeff *	associated with that region.
136254025Sjeff *
137254025Sjeff *	This routine may not block on kernel maps.
138206819Sjmallett */
139254025Sjeffvoid
140254025Sjeffkva_free(addr, size)
141206819Sjmallett	vm_offset_t addr;
14270480Salfred	vm_size_t size;
1431541Srgrimes{
1441541Srgrimes
1451541Srgrimes	size = round_page(size);
146254025Sjeff	vmem_free(kernel_arena, addr, size);
1471541Srgrimes}
1481541Srgrimes
1491541Srgrimes/*
150238452Salc *	Allocates a region from the kernel address map and physical pages
151238452Salc *	within the specified address range to the kernel object.  Creates a
152238452Salc *	wired mapping from this region to these pages, and returns the
153238452Salc *	region's starting virtual address.  The allocated pages are not
154238452Salc *	necessarily physically contiguous.  If M_ZERO is specified through the
155238452Salc *	given flags, then the pages are zeroed before they are mapped.
156238452Salc */
157238452Salcvm_offset_t
158254025Sjeffkmem_alloc_attr(vmem_t *vmem, vm_size_t size, int flags, vm_paddr_t low,
159238452Salc    vm_paddr_t high, vm_memattr_t memattr)
160238452Salc{
161254025Sjeff	vm_object_t object = vmem == kmem_arena ? kmem_object : kernel_object;
162238452Salc	vm_offset_t addr;
163254025Sjeff	vm_ooffset_t offset;
164238452Salc	vm_page_t m;
165238452Salc	int pflags, tries;
166254025Sjeff	int i;
167238452Salc
168238452Salc	size = round_page(size);
169254025Sjeff	if (vmem_alloc(vmem, size, M_BESTFIT | flags, &addr))
170238452Salc		return (0);
171238452Salc	offset = addr - VM_MIN_KERNEL_ADDRESS;
172254025Sjeff	pflags = malloc2vm_flags(flags) | VM_ALLOC_NOBUSY | VM_ALLOC_WIRED;
173248084Sattilio	VM_OBJECT_WLOCK(object);
174254025Sjeff	for (i = 0; i < size; i += PAGE_SIZE) {
175238452Salc		tries = 0;
176238452Salcretry:
177254025Sjeff		m = vm_page_alloc_contig(object, OFF_TO_IDX(offset + i),
178254025Sjeff		    pflags, 1, low, high, PAGE_SIZE, 0, memattr);
179238452Salc		if (m == NULL) {
180248084Sattilio			VM_OBJECT_WUNLOCK(object);
181238452Salc			if (tries < ((flags & M_NOWAIT) != 0 ? 1 : 3)) {
182238561Salc				vm_pageout_grow_cache(tries, low, high);
183248084Sattilio				VM_OBJECT_WLOCK(object);
184238452Salc				tries++;
185238452Salc				goto retry;
186238452Salc			}
187254025Sjeff			/*
188254025Sjeff			 * Unmap and free the pages.
189238452Salc			 */
190254025Sjeff			if (i != 0)
191254025Sjeff				pmap_remove(kernel_pmap, addr, addr + i);
192254025Sjeff			while (i != 0) {
193254025Sjeff				i -= PAGE_SIZE;
194254025Sjeff				m = vm_page_lookup(object,
195254025Sjeff				    OFF_TO_IDX(offset + i));
196254025Sjeff				vm_page_unwire(m, 0);
197254025Sjeff				vm_page_free(m);
198254025Sjeff			}
199254025Sjeff			vmem_free(vmem, addr, size);
200238452Salc			return (0);
201238452Salc		}
202238452Salc		if ((flags & M_ZERO) && (m->flags & PG_ZERO) == 0)
203238452Salc			pmap_zero_page(m);
204238452Salc		m->valid = VM_PAGE_BITS_ALL;
205254025Sjeff		pmap_enter(kernel_pmap, addr + i, VM_PROT_ALL, m, VM_PROT_ALL,
206254025Sjeff		    TRUE);
207238452Salc	}
208248084Sattilio	VM_OBJECT_WUNLOCK(object);
209238452Salc	return (addr);
210238452Salc}
211238452Salc
212238452Salc/*
213238452Salc *	Allocates a region from the kernel address map and physically
214238452Salc *	contiguous pages within the specified address range to the kernel
215238452Salc *	object.  Creates a wired mapping from this region to these pages, and
216238452Salc *	returns the region's starting virtual address.  If M_ZERO is specified
217238452Salc *	through the given flags, then the pages are zeroed before they are
218238452Salc *	mapped.
219238452Salc */
220238452Salcvm_offset_t
221254025Sjeffkmem_alloc_contig(struct vmem *vmem, vm_size_t size, int flags, vm_paddr_t low,
222238452Salc    vm_paddr_t high, u_long alignment, vm_paddr_t boundary,
223238452Salc    vm_memattr_t memattr)
224238452Salc{
225254025Sjeff	vm_object_t object = vmem == kmem_arena ? kmem_object : kernel_object;
226254025Sjeff	vm_offset_t addr, tmp;
227238452Salc	vm_ooffset_t offset;
228238452Salc	vm_page_t end_m, m;
229238452Salc	int pflags, tries;
230238452Salc
231238452Salc	size = round_page(size);
232254025Sjeff	if (vmem_alloc(vmem, size, flags | M_BESTFIT, &addr))
233238452Salc		return (0);
234238452Salc	offset = addr - VM_MIN_KERNEL_ADDRESS;
235254025Sjeff	pflags = malloc2vm_flags(flags) | VM_ALLOC_NOBUSY | VM_ALLOC_WIRED;
236248084Sattilio	VM_OBJECT_WLOCK(object);
237238452Salc	tries = 0;
238238452Salcretry:
239238452Salc	m = vm_page_alloc_contig(object, OFF_TO_IDX(offset), pflags,
240238452Salc	    atop(size), low, high, alignment, boundary, memattr);
241238452Salc	if (m == NULL) {
242248084Sattilio		VM_OBJECT_WUNLOCK(object);
243238452Salc		if (tries < ((flags & M_NOWAIT) != 0 ? 1 : 3)) {
244238561Salc			vm_pageout_grow_cache(tries, low, high);
245248084Sattilio			VM_OBJECT_WLOCK(object);
246238452Salc			tries++;
247238452Salc			goto retry;
248238452Salc		}
249254025Sjeff		vmem_free(vmem, addr, size);
250238452Salc		return (0);
251238452Salc	}
252238452Salc	end_m = m + atop(size);
253254025Sjeff	tmp = addr;
254238452Salc	for (; m < end_m; m++) {
255238452Salc		if ((flags & M_ZERO) && (m->flags & PG_ZERO) == 0)
256238452Salc			pmap_zero_page(m);
257238452Salc		m->valid = VM_PAGE_BITS_ALL;
258254025Sjeff		pmap_enter(kernel_pmap, tmp, VM_PROT_ALL, m, VM_PROT_ALL, true);
259254025Sjeff		tmp += PAGE_SIZE;
260238452Salc	}
261248084Sattilio	VM_OBJECT_WUNLOCK(object);
262238452Salc	return (addr);
263238452Salc}
264238452Salc
265238452Salc/*
2661541Srgrimes *	kmem_suballoc:
2671541Srgrimes *
2681541Srgrimes *	Allocates a map to manage a subrange
2691541Srgrimes *	of the kernel virtual address space.
2701541Srgrimes *
2711541Srgrimes *	Arguments are as follows:
2721541Srgrimes *
2731541Srgrimes *	parent		Map to take range from
27470480Salfred *	min, max	Returned endpoints of map
2751541Srgrimes *	size		Size of range to find
276178933Salc *	superpage_align	Request that min is superpage aligned
2771541Srgrimes */
2788876Srgrimesvm_map_t
279178933Salckmem_suballoc(vm_map_t parent, vm_offset_t *min, vm_offset_t *max,
280178933Salc    vm_size_t size, boolean_t superpage_align)
2811541Srgrimes{
28270478Salfred	int ret;
2835455Sdg	vm_map_t result;
2841541Srgrimes
2851541Srgrimes	size = round_page(size);
2861541Srgrimes
287178637Salc	*min = vm_map_min(parent);
288255426Sjhb	ret = vm_map_find(parent, NULL, 0, min, size, 0, superpage_align ?
289254430Sjhb	    VMFS_SUPER_SPACE : VMFS_ANY_SPACE, VM_PROT_ALL, VM_PROT_ALL,
290194766Skib	    MAP_ACC_NO_CHARGE);
291177762Salc	if (ret != KERN_SUCCESS)
292177762Salc		panic("kmem_suballoc: bad status return of %d", ret);
2931541Srgrimes	*max = *min + size;
29432702Sdyson	result = vm_map_create(vm_map_pmap(parent), *min, *max);
2951541Srgrimes	if (result == NULL)
2961541Srgrimes		panic("kmem_suballoc: cannot create submap");
29770478Salfred	if (vm_map_submap(parent, *min, *max, result) != KERN_SUCCESS)
2981541Srgrimes		panic("kmem_suballoc: unable to change range to submap");
2995455Sdg	return (result);
3001541Srgrimes}
3011541Srgrimes
3021541Srgrimes/*
30342957Sdillon *	kmem_malloc:
3041541Srgrimes *
305254025Sjeff *	Allocate wired-down pages in the kernel's address space.
3061541Srgrimes */
3071541Srgrimesvm_offset_t
308254025Sjeffkmem_malloc(struct vmem *vmem, vm_size_t size, int flags)
3091541Srgrimes{
3105455Sdg	vm_offset_t addr;
311254025Sjeff	int rv;
3121541Srgrimes
3131541Srgrimes	size = round_page(size);
314254025Sjeff	if (vmem_alloc(vmem, size, flags | M_BESTFIT, &addr))
315254025Sjeff		return (0);
3161541Srgrimes
317254025Sjeff	rv = kmem_back((vmem == kmem_arena) ? kmem_object : kernel_object,
318254025Sjeff	    addr, size, flags);
319254025Sjeff	if (rv != KERN_SUCCESS) {
320254025Sjeff		vmem_free(vmem, addr, size);
321254025Sjeff		return (0);
3221541Srgrimes	}
323254025Sjeff	return (addr);
324211194Smdf}
325211194Smdf
326211194Smdf/*
327211194Smdf *	kmem_back:
328211194Smdf *
329211194Smdf *	Allocate physical pages for the specified virtual address range.
330211194Smdf */
331211194Smdfint
332254025Sjeffkmem_back(vm_object_t object, vm_offset_t addr, vm_size_t size, int flags)
333211194Smdf{
334211194Smdf	vm_offset_t offset, i;
335211194Smdf	vm_page_t m;
336211194Smdf	int pflags;
337211194Smdf
338254025Sjeff	KASSERT(object == kmem_object || object == kernel_object,
339254025Sjeff	    ("kmem_back: only supports kernel objects."));
340254025Sjeff
34115367Sdyson	offset = addr - VM_MIN_KERNEL_ADDRESS;
342254025Sjeff	pflags = malloc2vm_flags(flags) | VM_ALLOC_NOBUSY | VM_ALLOC_WIRED;
3431541Srgrimes
344254025Sjeff	VM_OBJECT_WLOCK(object);
3451541Srgrimes	for (i = 0; i < size; i += PAGE_SIZE) {
34615809Sdysonretry:
347254025Sjeff		m = vm_page_alloc(object, OFF_TO_IDX(offset + i), pflags);
34898450Sjeff
3491541Srgrimes		/*
3505455Sdg		 * Ran out of space, free everything up and return. Don't need
3515455Sdg		 * to lock page queues here as we know that the pages we got
3525455Sdg		 * aren't on any queues.
3531541Srgrimes		 */
3541541Srgrimes		if (m == NULL) {
35542957Sdillon			if ((flags & M_NOWAIT) == 0) {
356254025Sjeff				VM_OBJECT_WUNLOCK(object);
35715809Sdyson				VM_WAIT;
358254025Sjeff				VM_OBJECT_WLOCK(object);
35915809Sdyson				goto retry;
36015809Sdyson			}
36191946Stegge			/*
362254025Sjeff			 * Unmap and free the pages.
36391946Stegge			 */
364254025Sjeff			if (i != 0)
365254025Sjeff				pmap_remove(kernel_pmap, addr, addr + i);
36691946Stegge			while (i != 0) {
36791946Stegge				i -= PAGE_SIZE;
368254025Sjeff				m = vm_page_lookup(object,
36991946Stegge						   OFF_TO_IDX(offset + i));
370108351Salc				vm_page_unwire(m, 0);
37191946Stegge				vm_page_free(m);
37291946Stegge			}
373254025Sjeff			VM_OBJECT_WUNLOCK(object);
374211194Smdf			return (KERN_NO_SPACE);
3751541Srgrimes		}
37698455Sjeff		if (flags & M_ZERO && (m->flags & PG_ZERO) == 0)
377102382Salc			pmap_zero_page(m);
378224746Skib		KASSERT((m->oflags & VPO_UNMANAGED) != 0,
379166964Salc		    ("kmem_malloc: page %p is managed", m));
380254025Sjeff		m->valid = VM_PAGE_BITS_ALL;
381254025Sjeff		pmap_enter(kernel_pmap, addr + i, VM_PROT_ALL, m, VM_PROT_ALL,
382254025Sjeff		    TRUE);
3831541Srgrimes	}
384254025Sjeff	VM_OBJECT_WUNLOCK(object);
3851541Srgrimes
386254025Sjeff	return (KERN_SUCCESS);
387254025Sjeff}
3881541Srgrimes
389254025Sjeffvoid
390254025Sjeffkmem_unback(vm_object_t object, vm_offset_t addr, vm_size_t size)
391254025Sjeff{
392254025Sjeff	vm_page_t m;
393254025Sjeff	vm_offset_t offset;
394254025Sjeff	int i;
39520993Sdyson
396254025Sjeff	KASSERT(object == kmem_object || object == kernel_object,
397254025Sjeff	    ("kmem_unback: only supports kernel objects."));
398254025Sjeff
399266588Salc	pmap_remove(kernel_pmap, addr, addr + size);
400254025Sjeff	offset = addr - VM_MIN_KERNEL_ADDRESS;
401254025Sjeff	VM_OBJECT_WLOCK(object);
4021541Srgrimes	for (i = 0; i < size; i += PAGE_SIZE) {
403254025Sjeff		m = vm_page_lookup(object, OFF_TO_IDX(offset + i));
404254025Sjeff		vm_page_unwire(m, 0);
405254025Sjeff		vm_page_free(m);
4061541Srgrimes	}
407254025Sjeff	VM_OBJECT_WUNLOCK(object);
408254025Sjeff}
4091541Srgrimes
410254025Sjeff/*
411254025Sjeff *	kmem_free:
412254025Sjeff *
413254025Sjeff *	Free memory allocated with kmem_malloc.  The size must match the
414254025Sjeff *	original allocation.
415254025Sjeff */
416254025Sjeffvoid
417254025Sjeffkmem_free(struct vmem *vmem, vm_offset_t addr, vm_size_t size)
418254025Sjeff{
419254025Sjeff
420254025Sjeff	size = round_page(size);
421254025Sjeff	kmem_unback((vmem == kmem_arena) ? kmem_object : kernel_object,
422254025Sjeff	    addr, size);
423254025Sjeff	vmem_free(vmem, addr, size);
4241541Srgrimes}
4251541Srgrimes
4261541Srgrimes/*
427254025Sjeff *	kmap_alloc_wait:
4281541Srgrimes *
4291541Srgrimes *	Allocates pageable memory from a sub-map of the kernel.  If the submap
4301541Srgrimes *	has no room, the caller sleeps waiting for more memory in the submap.
4311541Srgrimes *
43242957Sdillon *	This routine may block.
4331541Srgrimes */
4348876Srgrimesvm_offset_t
435254025Sjeffkmap_alloc_wait(map, size)
4365455Sdg	vm_map_t map;
4375455Sdg	vm_size_t size;
4381541Srgrimes{
4395455Sdg	vm_offset_t addr;
4401541Srgrimes
4411541Srgrimes	size = round_page(size);
442194766Skib	if (!swap_reserve(size))
443194766Skib		return (0);
4441541Srgrimes
4451541Srgrimes	for (;;) {
4461541Srgrimes		/*
4475455Sdg		 * To make this work for more than one map, use the map's lock
4485455Sdg		 * to lock out sleepers/wakers.
4491541Srgrimes		 */
4501541Srgrimes		vm_map_lock(map);
45133758Sdyson		if (vm_map_findspace(map, vm_map_min(map), size, &addr) == 0)
4521541Srgrimes			break;
4531541Srgrimes		/* no space now; see if we can ever get space */
4541541Srgrimes		if (vm_map_max(map) - vm_map_min(map) < size) {
4551541Srgrimes			vm_map_unlock(map);
456194766Skib			swap_release(size);
4571541Srgrimes			return (0);
4581541Srgrimes		}
45999754Salc		map->needs_wakeup = TRUE;
460173429Spjd		vm_map_unlock_and_wait(map, 0);
4611541Srgrimes	}
462194766Skib	vm_map_insert(map, NULL, 0, addr, addr + size, VM_PROT_ALL,
463194766Skib	    VM_PROT_ALL, MAP_ACC_CHARGED);
4641541Srgrimes	vm_map_unlock(map);
4651541Srgrimes	return (addr);
4661541Srgrimes}
4671541Srgrimes
4681541Srgrimes/*
469254025Sjeff *	kmap_free_wakeup:
4701541Srgrimes *
4719507Sdg *	Returns memory to a submap of the kernel, and wakes up any processes
4721541Srgrimes *	waiting for memory in that map.
4731541Srgrimes */
4748876Srgrimesvoid
475254025Sjeffkmap_free_wakeup(map, addr, size)
4765455Sdg	vm_map_t map;
4775455Sdg	vm_offset_t addr;
4785455Sdg	vm_size_t size;
4791541Srgrimes{
48076827Salfred
4811541Srgrimes	vm_map_lock(map);
482189015Skib	(void) vm_map_delete(map, trunc_page(addr), round_page(addr + size));
48399754Salc	if (map->needs_wakeup) {
48499754Salc		map->needs_wakeup = FALSE;
48599754Salc		vm_map_wakeup(map);
48699754Salc	}
4871541Srgrimes	vm_map_unlock(map);
4881541Srgrimes}
4891541Srgrimes
490254025Sjeffvoid
491221853Smdfkmem_init_zero_region(void)
492221853Smdf{
493221855Smdf	vm_offset_t addr, i;
494221853Smdf	vm_page_t m;
495221853Smdf
496221855Smdf	/*
497221855Smdf	 * Map a single physical page of zeros to a larger virtual range.
498221855Smdf	 * This requires less looping in places that want large amounts of
499221855Smdf	 * zeros, while not using much more physical resources.
500221855Smdf	 */
501254025Sjeff	addr = kva_alloc(ZERO_REGION_SIZE);
502226843Salc	m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL |
503221853Smdf	    VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO);
504221853Smdf	if ((m->flags & PG_ZERO) == 0)
505221853Smdf		pmap_zero_page(m);
506221853Smdf	for (i = 0; i < ZERO_REGION_SIZE; i += PAGE_SIZE)
507221853Smdf		pmap_qenter(addr + i, &m, 1);
508254025Sjeff	pmap_protect(kernel_pmap, addr, addr + ZERO_REGION_SIZE, VM_PROT_READ);
509221853Smdf
510221853Smdf	zero_region = (const void *)addr;
511221853Smdf}
512221853Smdf
5131541Srgrimes/*
51442957Sdillon * 	kmem_init:
51542957Sdillon *
51642957Sdillon *	Create the kernel map; insert a mapping covering kernel text,
51742957Sdillon *	data, bss, and all space allocated thus far (`boostrap' data).  The
51842957Sdillon *	new map will thus map the range between VM_MIN_KERNEL_ADDRESS and
51942957Sdillon *	`start' as allocated, and the range between `start' and `end' as free.
5201541Srgrimes */
5218876Srgrimesvoid
5225455Sdgkmem_init(start, end)
5231541Srgrimes	vm_offset_t start, end;
5241541Srgrimes{
52570480Salfred	vm_map_t m;
5261541Srgrimes
52732702Sdyson	m = vm_map_create(kernel_pmap, VM_MIN_KERNEL_ADDRESS, end);
528108426Salc	m->system_map = 1;
5291541Srgrimes	vm_map_lock(m);
5301541Srgrimes	/* N.B.: cannot use kgdb to debug, starting with this assignment ... */
5311541Srgrimes	kernel_map = m;
532108426Salc	(void) vm_map_insert(m, NULL, (vm_ooffset_t) 0,
533179923Salc#ifdef __amd64__
534179923Salc	    KERNBASE,
535179923Salc#else
536179923Salc	    VM_MIN_KERNEL_ADDRESS,
537179923Salc#endif
538179923Salc	    start, VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT);
5391541Srgrimes	/* ... and ending with the completion of the above `insert' */
5401541Srgrimes	vm_map_unlock(m);
5411541Srgrimes}
542188964Srwatson
543188967Srwatson#ifdef DIAGNOSTIC
544188964Srwatson/*
545188964Srwatson * Allow userspace to directly trigger the VM drain routine for testing
546188964Srwatson * purposes.
547188964Srwatson */
548188964Srwatsonstatic int
549188964Srwatsondebug_vm_lowmem(SYSCTL_HANDLER_ARGS)
550188964Srwatson{
551188964Srwatson	int error, i;
552188964Srwatson
553188964Srwatson	i = 0;
554188964Srwatson	error = sysctl_handle_int(oidp, &i, 0, req);
555188964Srwatson	if (error)
556188964Srwatson		return (error);
557188964Srwatson	if (i)
558188964Srwatson		EVENTHANDLER_INVOKE(vm_lowmem, 0);
559188964Srwatson	return (0);
560188964Srwatson}
561188964Srwatson
562188964SrwatsonSYSCTL_PROC(_debug, OID_AUTO, vm_lowmem, CTLTYPE_INT | CTLFLAG_RW, 0, 0,
563188964Srwatson    debug_vm_lowmem, "I", "set to trigger vm_lowmem event");
564188967Srwatson#endif
565