vm_kern.c revision 120050
15455Sdg/*
21541Srgrimes * Copyright (c) 1991, 1993
31541Srgrimes *	The Regents of the University of California.  All rights reserved.
41541Srgrimes *
51541Srgrimes * This code is derived from software contributed to Berkeley by
61541Srgrimes * The Mach Operating System project at Carnegie-Mellon University.
71541Srgrimes *
81541Srgrimes * Redistribution and use in source and binary forms, with or without
91541Srgrimes * modification, are permitted provided that the following conditions
101541Srgrimes * are met:
111541Srgrimes * 1. Redistributions of source code must retain the above copyright
121541Srgrimes *    notice, this list of conditions and the following disclaimer.
131541Srgrimes * 2. Redistributions in binary form must reproduce the above copyright
141541Srgrimes *    notice, this list of conditions and the following disclaimer in the
151541Srgrimes *    documentation and/or other materials provided with the distribution.
161541Srgrimes * 3. All advertising materials mentioning features or use of this software
1758705Scharnier *    must display the following acknowledgement:
181541Srgrimes *	This product includes software developed by the University of
191541Srgrimes *	California, Berkeley and its contributors.
201541Srgrimes * 4. Neither the name of the University nor the names of its contributors
211541Srgrimes *    may be used to endorse or promote products derived from this software
221541Srgrimes *    without specific prior written permission.
231541Srgrimes *
241541Srgrimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
251541Srgrimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
261541Srgrimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
271541Srgrimes * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
281541Srgrimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
291541Srgrimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
301541Srgrimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
311541Srgrimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
321541Srgrimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
331541Srgrimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
341541Srgrimes * SUCH DAMAGE.
351541Srgrimes *
361817Sdg *	from: @(#)vm_kern.c	8.3 (Berkeley) 1/12/94
371541Srgrimes *
381541Srgrimes *
391541Srgrimes * Copyright (c) 1987, 1990 Carnegie-Mellon University.
401541Srgrimes * All rights reserved.
411541Srgrimes *
421541Srgrimes * Authors: Avadis Tevanian, Jr., Michael Wayne Young
435455Sdg *
441541Srgrimes * Permission to use, copy, modify and distribute this software and
451541Srgrimes * its documentation is hereby granted, provided that both the copyright
461541Srgrimes * notice and this permission notice appear in all copies of the
471541Srgrimes * software, derivative works or modified versions, and any portions
481541Srgrimes * thereof, and that both notices appear in supporting documentation.
495455Sdg *
505455Sdg * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
515455Sdg * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
521541Srgrimes * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
535455Sdg *
541541Srgrimes * Carnegie Mellon requests users of this software to return to
551541Srgrimes *
561541Srgrimes *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
571541Srgrimes *  School of Computer Science
581541Srgrimes *  Carnegie Mellon University
591541Srgrimes *  Pittsburgh PA 15213-3890
601541Srgrimes *
611541Srgrimes * any improvements or extensions that they make and grant Carnegie the
621541Srgrimes * rights to redistribute these changes.
631541Srgrimes */
641541Srgrimes
651541Srgrimes/*
661541Srgrimes *	Kernel memory management.
671541Srgrimes */
681541Srgrimes
69116226Sobrien#include <sys/cdefs.h>
70116226Sobrien__FBSDID("$FreeBSD: head/sys/vm/vm_kern.c 120050 2003-09-14 02:37:59Z alc $");
71116226Sobrien
721541Srgrimes#include <sys/param.h>
731541Srgrimes#include <sys/systm.h>
7487157Sluigi#include <sys/kernel.h>		/* for ticks and hz */
7576166Smarkm#include <sys/lock.h>
7676166Smarkm#include <sys/mutex.h>
772112Swollman#include <sys/proc.h>
786129Sdg#include <sys/malloc.h>
791541Srgrimes
801541Srgrimes#include <vm/vm.h>
8112662Sdg#include <vm/vm_param.h>
8212662Sdg#include <vm/pmap.h>
8312662Sdg#include <vm/vm_map.h>
8412662Sdg#include <vm/vm_object.h>
851541Srgrimes#include <vm/vm_page.h>
861541Srgrimes#include <vm/vm_pageout.h>
8712726Sbde#include <vm/vm_extern.h>
881541Srgrimes
8919830Sdysonvm_map_t kernel_map=0;
9019830Sdysonvm_map_t kmem_map=0;
9119830Sdysonvm_map_t exec_map=0;
92118764Ssilbyvm_map_t pipe_map;
9319830Sdysonvm_map_t buffer_map=0;
942112Swollman
951541Srgrimes/*
961541Srgrimes *	kmem_alloc_pageable:
971541Srgrimes *
981541Srgrimes *	Allocate pageable memory to the kernel's address map.
9912259Sdg *	"map" must be kernel_map or a submap of kernel_map.
1001541Srgrimes */
1018876Srgrimesvm_offset_t
1025455Sdgkmem_alloc_pageable(map, size)
1035455Sdg	vm_map_t map;
10470480Salfred	vm_size_t size;
1051541Srgrimes{
1065455Sdg	vm_offset_t addr;
10770480Salfred	int result;
1081541Srgrimes
1091541Srgrimes	size = round_page(size);
1101541Srgrimes	addr = vm_map_min(map);
11198686Salc	result = vm_map_find(map, NULL, 0,
11213490Sdyson	    &addr, size, TRUE, VM_PROT_ALL, VM_PROT_ALL, 0);
1131541Srgrimes	if (result != KERN_SUCCESS) {
1145455Sdg		return (0);
1151541Srgrimes	}
1165455Sdg	return (addr);
1171541Srgrimes}
1181541Srgrimes
1191541Srgrimes/*
12047841Sdt *	kmem_alloc_nofault:
12147841Sdt *
122118317Salc *	Allocate a virtual address range with no underlying object and
123118317Salc *	no initial mapping to physical memory.  Any mapping from this
124118317Salc *	range to physical memory must be explicitly created prior to
125118317Salc *	its use, typically with pmap_qenter().  Any attempt to create
126118317Salc *	a mapping on demand through vm_fault() will result in a panic.
12747841Sdt */
12847841Sdtvm_offset_t
12947841Sdtkmem_alloc_nofault(map, size)
13047841Sdt	vm_map_t map;
13170480Salfred	vm_size_t size;
13247841Sdt{
13347841Sdt	vm_offset_t addr;
13470480Salfred	int result;
13547841Sdt
13647841Sdt	size = round_page(size);
13747841Sdt	addr = vm_map_min(map);
13898686Salc	result = vm_map_find(map, NULL, 0,
13947841Sdt	    &addr, size, TRUE, VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT);
14047841Sdt	if (result != KERN_SUCCESS) {
14147841Sdt		return (0);
14247841Sdt	}
14347841Sdt	return (addr);
14447841Sdt}
14547841Sdt
14647841Sdt/*
1471541Srgrimes *	Allocate wired-down memory in the kernel's address map
1481541Srgrimes *	or a submap.
1491541Srgrimes */
1508876Srgrimesvm_offset_t
1515455Sdgkmem_alloc(map, size)
15270480Salfred	vm_map_t map;
15370480Salfred	vm_size_t size;
1541541Srgrimes{
1555455Sdg	vm_offset_t addr;
15670480Salfred	vm_offset_t offset;
1575455Sdg	vm_offset_t i;
1581541Srgrimes
1591541Srgrimes	size = round_page(size);
1601541Srgrimes
1611541Srgrimes	/*
1625455Sdg	 * Use the kernel object for wired-down kernel pages. Assume that no
1635455Sdg	 * region of the kernel object is referenced more than once.
1641541Srgrimes	 */
1651541Srgrimes
1661541Srgrimes	/*
1675455Sdg	 * Locate sufficient space in the map.  This will give us the final
1685455Sdg	 * virtual address for the new memory, and thus will tell us the
1695455Sdg	 * offset within the kernel map.
1701541Srgrimes	 */
1711541Srgrimes	vm_map_lock(map);
17233758Sdyson	if (vm_map_findspace(map, vm_map_min(map), size, &addr)) {
1731541Srgrimes		vm_map_unlock(map);
1741541Srgrimes		return (0);
1751541Srgrimes	}
1761541Srgrimes	offset = addr - VM_MIN_KERNEL_ADDRESS;
1771541Srgrimes	vm_object_reference(kernel_object);
17813490Sdyson	vm_map_insert(map, kernel_object, offset, addr, addr + size,
17913490Sdyson		VM_PROT_ALL, VM_PROT_ALL, 0);
1801541Srgrimes	vm_map_unlock(map);
1811541Srgrimes
1821541Srgrimes	/*
1835455Sdg	 * Guarantee that there are pages already in this object before
1845455Sdg	 * calling vm_map_pageable.  This is to prevent the following
1855455Sdg	 * scenario:
1868876Srgrimes	 *
1875455Sdg	 * 1) Threads have swapped out, so that there is a pager for the
1885455Sdg	 * kernel_object. 2) The kmsg zone is empty, and so we are
1895455Sdg	 * kmem_allocing a new page for it. 3) vm_map_pageable calls vm_fault;
1905455Sdg	 * there is no page, but there is a pager, so we call
1915455Sdg	 * pager_data_request.  But the kmsg zone is empty, so we must
1925455Sdg	 * kmem_alloc. 4) goto 1 5) Even if the kmsg zone is not empty: when
1935455Sdg	 * we get the data back from the pager, it will be (very stale)
1945455Sdg	 * non-zero data.  kmem_alloc is defined to return zero-filled memory.
1958876Srgrimes	 *
1965455Sdg	 * We're intentionally not activating the pages we allocate to prevent a
1975455Sdg	 * race with page-out.  vm_map_pageable will wire the pages.
1981541Srgrimes	 */
1995455Sdg	for (i = 0; i < size; i += PAGE_SIZE) {
2005455Sdg		vm_page_t mem;
2011541Srgrimes
202115997Salc		VM_OBJECT_LOCK(kernel_object);
20333109Sdyson		mem = vm_page_grab(kernel_object, OFF_TO_IDX(offset + i),
20433109Sdyson				VM_ALLOC_ZERO | VM_ALLOC_RETRY);
205115997Salc		VM_OBJECT_UNLOCK(kernel_object);
20610548Sdyson		if ((mem->flags & PG_ZERO) == 0)
207102382Salc			pmap_zero_page(mem);
208108251Salc		vm_page_lock_queues();
2096585Sdg		mem->valid = VM_PAGE_BITS_ALL;
21042957Sdillon		vm_page_flag_clear(mem, PG_ZERO);
21142957Sdillon		vm_page_wakeup(mem);
212108251Salc		vm_page_unlock_queues();
2131541Srgrimes	}
2145455Sdg
2151541Srgrimes	/*
2165455Sdg	 * And finally, mark the data as non-pageable.
2171541Srgrimes	 */
218118771Sbms	(void) vm_map_wire(map, addr, addr + size,
219118771Sbms	    VM_MAP_WIRE_SYSTEM|VM_MAP_WIRE_NOHOLES);
2201541Srgrimes
2215455Sdg	return (addr);
2221541Srgrimes}
2231541Srgrimes
2241541Srgrimes/*
2251541Srgrimes *	kmem_free:
2261541Srgrimes *
2271541Srgrimes *	Release a region of kernel virtual memory allocated
2281541Srgrimes *	with kmem_alloc, and return the physical pages
2291541Srgrimes *	associated with that region.
23042957Sdillon *
23142957Sdillon *	This routine may not block on kernel maps.
2321541Srgrimes */
2338876Srgrimesvoid
2345455Sdgkmem_free(map, addr, size)
2355455Sdg	vm_map_t map;
23670480Salfred	vm_offset_t addr;
2375455Sdg	vm_size_t size;
2381541Srgrimes{
23971571Sjhb
2401541Srgrimes	(void) vm_map_remove(map, trunc_page(addr), round_page(addr + size));
2411541Srgrimes}
2421541Srgrimes
2431541Srgrimes/*
2441541Srgrimes *	kmem_suballoc:
2451541Srgrimes *
2461541Srgrimes *	Allocates a map to manage a subrange
2471541Srgrimes *	of the kernel virtual address space.
2481541Srgrimes *
2491541Srgrimes *	Arguments are as follows:
2501541Srgrimes *
2511541Srgrimes *	parent		Map to take range from
25270480Salfred *	min, max	Returned endpoints of map
2531541Srgrimes *	size		Size of range to find
2541541Srgrimes */
2558876Srgrimesvm_map_t
25632702Sdysonkmem_suballoc(parent, min, max, size)
25770478Salfred	vm_map_t parent;
2585455Sdg	vm_offset_t *min, *max;
25970478Salfred	vm_size_t size;
2601541Srgrimes{
26170478Salfred	int ret;
2625455Sdg	vm_map_t result;
2631541Srgrimes
26479224Sdillon	GIANT_REQUIRED;
26576827Salfred
2661541Srgrimes	size = round_page(size);
2671541Srgrimes
2681541Srgrimes	*min = (vm_offset_t) vm_map_min(parent);
2691541Srgrimes	ret = vm_map_find(parent, NULL, (vm_offset_t) 0,
27013490Sdyson	    min, size, TRUE, VM_PROT_ALL, VM_PROT_ALL, 0);
2711541Srgrimes	if (ret != KERN_SUCCESS) {
2721541Srgrimes		printf("kmem_suballoc: bad status return of %d.\n", ret);
2731541Srgrimes		panic("kmem_suballoc");
2741541Srgrimes	}
2751541Srgrimes	*max = *min + size;
27632702Sdyson	result = vm_map_create(vm_map_pmap(parent), *min, *max);
2771541Srgrimes	if (result == NULL)
2781541Srgrimes		panic("kmem_suballoc: cannot create submap");
27970478Salfred	if (vm_map_submap(parent, *min, *max, result) != KERN_SUCCESS)
2801541Srgrimes		panic("kmem_suballoc: unable to change range to submap");
2815455Sdg	return (result);
2821541Srgrimes}
2831541Srgrimes
2841541Srgrimes/*
28542957Sdillon *	kmem_malloc:
2861541Srgrimes *
28742957Sdillon * 	Allocate wired-down memory in the kernel's address map for the higher
28842957Sdillon * 	level kernel memory allocator (kern/kern_malloc.c).  We cannot use
28942957Sdillon * 	kmem_alloc() because we may need to allocate memory at interrupt
29042957Sdillon * 	level where we cannot block (canwait == FALSE).
2911541Srgrimes *
29242957Sdillon * 	This routine has its own private kernel submap (kmem_map) and object
29342957Sdillon * 	(kmem_object).  This, combined with the fact that only malloc uses
29442957Sdillon * 	this routine, ensures that we will never block in map or object waits.
2951541Srgrimes *
29642957Sdillon * 	Note that this still only works in a uni-processor environment and
29742957Sdillon * 	when called at splhigh().
29842957Sdillon *
29942957Sdillon * 	We don't worry about expanding the map (adding entries) since entries
30042957Sdillon * 	for wired maps are statically allocated.
30142957Sdillon *
30242957Sdillon *	NOTE:  This routine is not supposed to block if M_NOWAIT is set, but
30342957Sdillon *	I have not verified that it actually does not block.
30478592Sbmilekic *
30578592Sbmilekic *	`map' is ONLY allowed to be kmem_map or one of the mbuf submaps to
30678592Sbmilekic *	which we never free.
3071541Srgrimes */
3081541Srgrimesvm_offset_t
30942957Sdillonkmem_malloc(map, size, flags)
31070480Salfred	vm_map_t map;
31170480Salfred	vm_size_t size;
31242957Sdillon	int flags;
3131541Srgrimes{
31470480Salfred	vm_offset_t offset, i;
3155455Sdg	vm_map_entry_t entry;
3165455Sdg	vm_offset_t addr;
3175455Sdg	vm_page_t m;
31898455Sjeff	int pflags;
3191541Srgrimes
3201541Srgrimes	size = round_page(size);
3211541Srgrimes	addr = vm_map_min(map);
3221541Srgrimes
3231541Srgrimes	/*
3245455Sdg	 * Locate sufficient space in the map.  This will give us the final
3255455Sdg	 * virtual address for the new memory, and thus will tell us the
3265455Sdg	 * offset within the kernel map.
3271541Srgrimes	 */
3281541Srgrimes	vm_map_lock(map);
32933758Sdyson	if (vm_map_findspace(map, vm_map_min(map), size, &addr)) {
3301541Srgrimes		vm_map_unlock(map);
33178592Sbmilekic		if (map != kmem_map) {
33287157Sluigi			static int last_report; /* when we did it (in ticks) */
33387157Sluigi			if (ticks < last_report ||
33487157Sluigi			    (ticks - last_report) >= hz) {
33587157Sluigi				last_report = ticks;
33687157Sluigi				printf("Out of mbuf address space!\n");
33787157Sluigi				printf("Consider increasing NMBCLUSTERS\n");
33887157Sluigi			}
339113418Salc			return (0);
3407066Sdg		}
34142957Sdillon		if ((flags & M_NOWAIT) == 0)
34248409Speter			panic("kmem_malloc(%ld): kmem_map too small: %ld total allocated",
34348409Speter				(long)size, (long)map->size);
344113418Salc		return (0);
3451541Srgrimes	}
34615367Sdyson	offset = addr - VM_MIN_KERNEL_ADDRESS;
3471541Srgrimes	vm_object_reference(kmem_object);
34813490Sdyson	vm_map_insert(map, kmem_object, offset, addr, addr + size,
34913490Sdyson		VM_PROT_ALL, VM_PROT_ALL, 0);
3501541Srgrimes
35198455Sjeff	/*
35298455Sjeff	 * Note: if M_NOWAIT specified alone, allocate from
35398455Sjeff	 * interrupt-safe queues only (just the free list).  If
35498455Sjeff	 * M_USE_RESERVE is also specified, we can also
35598455Sjeff	 * allocate from the cache.  Neither of the latter two
35698455Sjeff	 * flags may be specified from an interrupt since interrupts
35798455Sjeff	 * are not allowed to mess with the cache queue.
35898455Sjeff	 */
35998455Sjeff
36098455Sjeff	if ((flags & (M_NOWAIT|M_USE_RESERVE)) == M_NOWAIT)
361108351Salc		pflags = VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED;
36298455Sjeff	else
363108351Salc		pflags = VM_ALLOC_SYSTEM | VM_ALLOC_WIRED;
36498455Sjeff
36598455Sjeff	if (flags & M_ZERO)
36698455Sjeff		pflags |= VM_ALLOC_ZERO;
36798455Sjeff
368113489Salc	VM_OBJECT_LOCK(kmem_object);
3691541Srgrimes	for (i = 0; i < size; i += PAGE_SIZE) {
37015809Sdysonretry:
37198450Sjeff		m = vm_page_alloc(kmem_object, OFF_TO_IDX(offset + i), pflags);
37298450Sjeff
3731541Srgrimes		/*
3745455Sdg		 * Ran out of space, free everything up and return. Don't need
3755455Sdg		 * to lock page queues here as we know that the pages we got
3765455Sdg		 * aren't on any queues.
3771541Srgrimes		 */
3781541Srgrimes		if (m == NULL) {
37942957Sdillon			if ((flags & M_NOWAIT) == 0) {
380113489Salc				VM_OBJECT_UNLOCK(kmem_object);
38144793Salc				vm_map_unlock(map);
38215809Sdyson				VM_WAIT;
38344793Salc				vm_map_lock(map);
384113489Salc				VM_OBJECT_LOCK(kmem_object);
38515809Sdyson				goto retry;
38615809Sdyson			}
38791946Stegge			/*
38891946Stegge			 * Free the pages before removing the map entry.
38991946Stegge			 * They are already marked busy.  Calling
39091946Stegge			 * vm_map_delete before the pages has been freed or
39191946Stegge			 * unbusied will cause a deadlock.
39291946Stegge			 */
39391946Stegge			while (i != 0) {
39491946Stegge				i -= PAGE_SIZE;
39591946Stegge				m = vm_page_lookup(kmem_object,
39691946Stegge						   OFF_TO_IDX(offset + i));
397100796Salc				vm_page_lock_queues();
398108351Salc				vm_page_unwire(m, 0);
39991946Stegge				vm_page_free(m);
400100796Salc				vm_page_unlock_queues();
40191946Stegge			}
402113489Salc			VM_OBJECT_UNLOCK(kmem_object);
4031541Srgrimes			vm_map_delete(map, addr, addr + size);
4041541Srgrimes			vm_map_unlock(map);
405113418Salc			return (0);
4061541Srgrimes		}
40798455Sjeff		if (flags & M_ZERO && (m->flags & PG_ZERO) == 0)
408102382Salc			pmap_zero_page(m);
409108262Salc		vm_page_lock_queues();
41038799Sdfr		vm_page_flag_clear(m, PG_ZERO);
4116585Sdg		m->valid = VM_PAGE_BITS_ALL;
412120050Salc		vm_page_unmanage(m);
413108262Salc		vm_page_unlock_queues();
4141541Srgrimes	}
415113489Salc	VM_OBJECT_UNLOCK(kmem_object);
4161541Srgrimes
4171541Srgrimes	/*
4185455Sdg	 * Mark map entry as non-pageable. Assert: vm_map_insert() will never
4195455Sdg	 * be able to extend the previous entry so there will be a new entry
4205455Sdg	 * exactly corresponding to this address range and it will have
4215455Sdg	 * wired_count == 0.
4221541Srgrimes	 */
4231541Srgrimes	if (!vm_map_lookup_entry(map, addr, &entry) ||
4241541Srgrimes	    entry->start != addr || entry->end != addr + size ||
42544793Salc	    entry->wired_count != 0)
4261541Srgrimes		panic("kmem_malloc: entry not found or misaligned");
42744793Salc	entry->wired_count = 1;
4281541Srgrimes
42920993Sdyson	vm_map_simplify_entry(map, entry);
43020993Sdyson
4311541Srgrimes	/*
4325455Sdg	 * Loop thru pages, entering them in the pmap. (We cannot add them to
4335455Sdg	 * the wired count without wrapping the vm_page_queue_lock in
4345455Sdg	 * splimp...)
4351541Srgrimes	 */
4361541Srgrimes	for (i = 0; i < size; i += PAGE_SIZE) {
437113489Salc		VM_OBJECT_LOCK(kmem_object);
43812767Sdyson		m = vm_page_lookup(kmem_object, OFF_TO_IDX(offset + i));
439113489Salc		VM_OBJECT_UNLOCK(kmem_object);
44042957Sdillon		/*
44142957Sdillon		 * Because this is kernel_pmap, this call will not block.
44242957Sdillon		 */
44360755Speter		pmap_enter(kernel_pmap, addr + i, m, VM_PROT_ALL, 1);
444107989Salc		vm_page_lock_queues();
445101634Salc		vm_page_flag_set(m, PG_WRITEABLE | PG_REFERENCED);
446108351Salc		vm_page_wakeup(m);
447107989Salc		vm_page_unlock_queues();
4481541Srgrimes	}
4491541Srgrimes	vm_map_unlock(map);
4501541Srgrimes
4515455Sdg	return (addr);
4521541Srgrimes}
4531541Srgrimes
4541541Srgrimes/*
45542957Sdillon *	kmem_alloc_wait:
4561541Srgrimes *
4571541Srgrimes *	Allocates pageable memory from a sub-map of the kernel.  If the submap
4581541Srgrimes *	has no room, the caller sleeps waiting for more memory in the submap.
4591541Srgrimes *
46042957Sdillon *	This routine may block.
4611541Srgrimes */
4628876Srgrimesvm_offset_t
4635455Sdgkmem_alloc_wait(map, size)
4645455Sdg	vm_map_t map;
4655455Sdg	vm_size_t size;
4661541Srgrimes{
4675455Sdg	vm_offset_t addr;
4681541Srgrimes
4691541Srgrimes	size = round_page(size);
4701541Srgrimes
4711541Srgrimes	for (;;) {
4721541Srgrimes		/*
4735455Sdg		 * To make this work for more than one map, use the map's lock
4745455Sdg		 * to lock out sleepers/wakers.
4751541Srgrimes		 */
4761541Srgrimes		vm_map_lock(map);
47733758Sdyson		if (vm_map_findspace(map, vm_map_min(map), size, &addr) == 0)
4781541Srgrimes			break;
4791541Srgrimes		/* no space now; see if we can ever get space */
4801541Srgrimes		if (vm_map_max(map) - vm_map_min(map) < size) {
4811541Srgrimes			vm_map_unlock(map);
4821541Srgrimes			return (0);
4831541Srgrimes		}
48499754Salc		map->needs_wakeup = TRUE;
48599754Salc		vm_map_unlock_and_wait(map, FALSE);
4861541Srgrimes	}
48799754Salc	vm_map_insert(map, NULL, 0, addr, addr + size, VM_PROT_ALL, VM_PROT_ALL, 0);
4881541Srgrimes	vm_map_unlock(map);
4891541Srgrimes	return (addr);
4901541Srgrimes}
4911541Srgrimes
4921541Srgrimes/*
49342957Sdillon *	kmem_free_wakeup:
4941541Srgrimes *
4959507Sdg *	Returns memory to a submap of the kernel, and wakes up any processes
4961541Srgrimes *	waiting for memory in that map.
4971541Srgrimes */
4988876Srgrimesvoid
4995455Sdgkmem_free_wakeup(map, addr, size)
5005455Sdg	vm_map_t map;
5015455Sdg	vm_offset_t addr;
5025455Sdg	vm_size_t size;
5031541Srgrimes{
50476827Salfred
5051541Srgrimes	vm_map_lock(map);
5061541Srgrimes	(void) vm_map_delete(map, trunc_page(addr), round_page(addr + size));
50799754Salc	if (map->needs_wakeup) {
50899754Salc		map->needs_wakeup = FALSE;
50999754Salc		vm_map_wakeup(map);
51099754Salc	}
5111541Srgrimes	vm_map_unlock(map);
5121541Srgrimes}
5131541Srgrimes
5141541Srgrimes/*
51542957Sdillon * 	kmem_init:
51642957Sdillon *
51742957Sdillon *	Create the kernel map; insert a mapping covering kernel text,
51842957Sdillon *	data, bss, and all space allocated thus far (`boostrap' data).  The
51942957Sdillon *	new map will thus map the range between VM_MIN_KERNEL_ADDRESS and
52042957Sdillon *	`start' as allocated, and the range between `start' and `end' as free.
5211541Srgrimes */
5228876Srgrimesvoid
5235455Sdgkmem_init(start, end)
5241541Srgrimes	vm_offset_t start, end;
5251541Srgrimes{
52670480Salfred	vm_map_t m;
5271541Srgrimes
52832702Sdyson	m = vm_map_create(kernel_pmap, VM_MIN_KERNEL_ADDRESS, end);
529108426Salc	m->system_map = 1;
5301541Srgrimes	vm_map_lock(m);
5311541Srgrimes	/* N.B.: cannot use kgdb to debug, starting with this assignment ... */
5321541Srgrimes	kernel_map = m;
533108426Salc	(void) vm_map_insert(m, NULL, (vm_ooffset_t) 0,
53413490Sdyson	    VM_MIN_KERNEL_ADDRESS, start, VM_PROT_ALL, VM_PROT_ALL, 0);
5351541Srgrimes	/* ... and ending with the completion of the above `insert' */
5361541Srgrimes	vm_map_unlock(m);
5371541Srgrimes}
538