vm_kern.c revision 178637
1128710Sru/*-
2128710Sru * Copyright (c) 1991, 1993
3128710Sru *	The Regents of the University of California.  All rights reserved.
4128710Sru *
5128710Sru * This code is derived from software contributed to Berkeley by
6128710Sru * The Mach Operating System project at Carnegie-Mellon University.
7128710Sru *
8128710Sru * Redistribution and use in source and binary forms, with or without
9128710Sru * modification, are permitted provided that the following conditions
10128710Sru * are met:
11128710Sru * 1. Redistributions of source code must retain the above copyright
12128710Sru *    notice, this list of conditions and the following disclaimer.
13128710Sru * 2. Redistributions in binary form must reproduce the above copyright
14128710Sru *    notice, this list of conditions and the following disclaimer in the
15128710Sru *    documentation and/or other materials provided with the distribution.
16128710Sru * 4. Neither the name of the University nor the names of its contributors
1743561Skato *    may be used to endorse or promote products derived from this software
18235264Savg *    without specific prior written permission.
19235264Savg *
20128710Sru * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21128710Sru * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22128710Sru * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
2343561Skato * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
2443561Skato * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
2543561Skato * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26200254Snyan * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27200254Snyan * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28200254Snyan * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29200254Snyan * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30200254Snyan * SUCH DAMAGE.
3143561Skato *
3243561Skato *	from: @(#)vm_kern.c	8.3 (Berkeley) 1/12/94
33128710Sru *
34128710Sru *
35128710Sru * Copyright (c) 1987, 1990 Carnegie-Mellon University.
3643561Skato * All rights reserved.
3743561Skato *
38128710Sru * Authors: Avadis Tevanian, Jr., Michael Wayne Young
39200254Snyan *
40200254Snyan * Permission to use, copy, modify and distribute this software and
41200254Snyan * its documentation is hereby granted, provided that both the copyright
42200254Snyan * notice and this permission notice appear in all copies of the
43200254Snyan * software, derivative works or modified versions, and any portions
44200254Snyan * thereof, and that both notices appear in supporting documentation.
45200254Snyan *
46200254Snyan * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
47128710Sru * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
48128710Sru * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
4943561Skato *
5043561Skato * Carnegie Mellon requests users of this software to return to
5143561Skato *
5243561Skato *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
5343561Skato *  School of Computer Science
5443561Skato *  Carnegie Mellon University
5543561Skato *  Pittsburgh PA 15213-3890
56128710Sru *
57128710Sru * any improvements or extensions that they make and grant Carnegie the
58128710Sru * rights to redistribute these changes.
5943561Skato */
6043561Skato
6143561Skato/*
62128710Sru *	Kernel memory management.
63128710Sru */
64128710Sru
6543561Skato#include <sys/cdefs.h>
6643561Skato__FBSDID("$FreeBSD: head/sys/vm/vm_kern.c 178637 2008-04-28 17:25:27Z alc $");
67128710Sru
68200254Snyan#include <sys/param.h>
69128710Sru#include <sys/systm.h>
70200254Snyan#include <sys/kernel.h>		/* for ticks and hz */
71200254Snyan#include <sys/eventhandler.h>
72200254Snyan#include <sys/lock.h>
73200254Snyan#include <sys/mutex.h>
74200254Snyan#include <sys/proc.h>
75200254Snyan#include <sys/malloc.h>
76128710Sru
77200254Snyan#include <vm/vm.h>
78200254Snyan#include <vm/vm_param.h>
79200254Snyan#include <vm/pmap.h>
80200254Snyan#include <vm/vm_map.h>
81200254Snyan#include <vm/vm_object.h>
82200254Snyan#include <vm/vm_page.h>
83128710Sru#include <vm/vm_pageout.h>
84128710Sru#include <vm/vm_extern.h>
8543561Skato#include <vm/uma.h>
8643561Skato
8743561Skatovm_map_t kernel_map=0;
8843561Skatovm_map_t kmem_map=0;
89128710Sruvm_map_t exec_map=0;
90128710Sruvm_map_t pipe_map;
91128710Sruvm_map_t buffer_map=0;
9243561Skato
9343561Skato/*
9443561Skato *	kmem_alloc_nofault:
95128710Sru *
96128710Sru *	Allocate a virtual address range with no underlying object and
97128710Sru *	no initial mapping to physical memory.  Any mapping from this
9858871Skato *	range to physical memory must be explicitly created prior to
9958871Skato *	its use, typically with pmap_qenter().  Any attempt to create
100128710Sru *	a mapping on demand through vm_fault() will result in a panic.
101128710Sru */
102128710Sruvm_offset_t
10343561Skatokmem_alloc_nofault(map, size)
10443561Skato	vm_map_t map;
105176645Snyan	vm_size_t size;
10643561Skato{
107128710Sru	vm_offset_t addr;
108128710Sru	int result;
109128710Sru
11043561Skato	size = round_page(size);
11161064Snyan	addr = vm_map_min(map);
11243561Skato	result = vm_map_find(map, NULL, 0,
113128710Sru	    &addr, size, TRUE, VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT);
114128710Sru	if (result != KERN_SUCCESS) {
115128710Sru		return (0);
11643561Skato	}
11743561Skato	return (addr);
11843561Skato}
11943561Skato
120200254Snyan/*
12168358Snyan *	Allocate wired-down memory in the kernel's address map
12243561Skato *	or a submap.
12343561Skato */
12443561Skatovm_offset_t
125128710Srukmem_alloc(map, size)
126128710Sru	vm_map_t map;
127128710Sru	vm_size_t size;
12843561Skato{
12961064Snyan	vm_offset_t addr;
13061064Snyan	vm_offset_t offset;
13161064Snyan	vm_offset_t i;
13261064Snyan
13361064Snyan	size = round_page(size);
13461064Snyan
13561064Snyan	/*
136128710Sru	 * Use the kernel object for wired-down kernel pages. Assume that no
137128710Sru	 * region of the kernel object is referenced more than once.
138128710Sru	 */
13961064Snyan
14061064Snyan	/*
14143561Skato	 * Locate sufficient space in the map.  This will give us the final
14261064Snyan	 * virtual address for the new memory, and thus will tell us the
143128710Sru	 * offset within the kernel map.
144200254Snyan	 */
145200254Snyan	vm_map_lock(map);
146200254Snyan	if (vm_map_findspace(map, vm_map_min(map), size, &addr)) {
147200254Snyan		vm_map_unlock(map);
148200254Snyan		return (0);
149200254Snyan	}
150200254Snyan	offset = addr - VM_MIN_KERNEL_ADDRESS;
151200254Snyan	vm_object_reference(kernel_object);
152200254Snyan	vm_map_insert(map, kernel_object, offset, addr, addr + size,
153200254Snyan		VM_PROT_ALL, VM_PROT_ALL, 0);
154200254Snyan	vm_map_unlock(map);
155200254Snyan
156128710Sru	/*
157128710Sru	 * Guarantee that there are pages already in this object before
158200254Snyan	 * calling vm_map_wire.  This is to prevent the following
15961064Snyan	 * scenario:
16043561Skato	 *
16161064Snyan	 * 1) Threads have swapped out, so that there is a pager for the
16261064Snyan	 * kernel_object. 2) The kmsg zone is empty, and so we are
16361064Snyan	 * kmem_allocing a new page for it. 3) vm_map_wire calls vm_fault;
16443561Skato	 * there is no page, but there is a pager, so we call
16561064Snyan	 * pager_data_request.  But the kmsg zone is empty, so we must
16661064Snyan	 * kmem_alloc. 4) goto 1 5) Even if the kmsg zone is not empty: when
16761064Snyan	 * we get the data back from the pager, it will be (very stale)
16861064Snyan	 * non-zero data.  kmem_alloc is defined to return zero-filled memory.
16961064Snyan	 *
17061064Snyan	 * We're intentionally not activating the pages we allocate to prevent a
17143561Skato	 * race with page-out.  vm_map_wire will wire the pages.
17261064Snyan	 */
17361064Snyan	VM_OBJECT_LOCK(kernel_object);
17461064Snyan	for (i = 0; i < size; i += PAGE_SIZE) {
17561064Snyan		vm_page_t mem;
17661064Snyan
17743561Skato		mem = vm_page_grab(kernel_object, OFF_TO_IDX(offset + i),
17843561Skato		    VM_ALLOC_NOBUSY | VM_ALLOC_ZERO | VM_ALLOC_RETRY);
179128710Sru		mem->valid = VM_PAGE_BITS_ALL;
180128710Sru		KASSERT((mem->flags & PG_UNMANAGED) != 0,
181128710Sru		    ("kmem_alloc: page %p is managed", mem));
18261064Snyan	}
18361064Snyan	VM_OBJECT_UNLOCK(kernel_object);
18461064Snyan
185128710Sru	/*
186128710Sru	 * And finally, mark the data as non-pageable.
187128710Sru	 */
18861064Snyan	(void) vm_map_wire(map, addr, addr + size,
18961064Snyan	    VM_MAP_WIRE_SYSTEM|VM_MAP_WIRE_NOHOLES);
19061064Snyan
19161064Snyan	return (addr);
19261064Snyan}
193164114Snyan
194128710Sru/*
19561064Snyan *	kmem_free:
19661064Snyan *
19743561Skato *	Release a region of kernel virtual memory allocated
19843561Skato *	with kmem_alloc, and return the physical pages
19961064Snyan *	associated with that region.
200128710Sru *
201128710Sru *	This routine may not block on kernel maps.
202128710Sru */
20343561Skatovoid
20461064Snyankmem_free(map, addr, size)
20543561Skato	vm_map_t map;
20643561Skato	vm_offset_t addr;
20743561Skato	vm_size_t size;
20843561Skato{
20943561Skato
210235264Savg	(void) vm_map_remove(map, trunc_page(addr), round_page(addr + size));
21143561Skato}
21243561Skato
21343561Skato/*
21443561Skato *	kmem_suballoc:
21561064Snyan *
21661064Snyan *	Allocates a map to manage a subrange
21743561Skato *	of the kernel virtual address space.
21843561Skato *
21943561Skato *	Arguments are as follows:
22043561Skato *
22143561Skato *	parent		Map to take range from
22243561Skato *	min, max	Returned endpoints of map
22343561Skato *	size		Size of range to find
22461064Snyan */
22543561Skatovm_map_t
226125780Snyankmem_suballoc(parent, min, max, size)
22786497Snyan	vm_map_t parent;
228125780Snyan	vm_offset_t *min, *max;
22943561Skato	vm_size_t size;
23043561Skato{
23143561Skato	int ret;
23243561Skato	vm_map_t result;
23343561Skato
23443561Skato	size = round_page(size);
235128710Sru
236128710Sru	*min = vm_map_min(parent);
237128710Sru	ret = vm_map_find(parent, NULL, 0,
23843561Skato	    min, size, TRUE, VM_PROT_ALL, VM_PROT_ALL, 0);
23943561Skato	if (ret != KERN_SUCCESS)
240128710Sru		panic("kmem_suballoc: bad status return of %d", ret);
241128710Sru	*max = *min + size;
242128710Sru	result = vm_map_create(vm_map_pmap(parent), *min, *max);
24343561Skato	if (result == NULL)
24443561Skato		panic("kmem_suballoc: cannot create submap");
24543561Skato	if (vm_map_submap(parent, *min, *max, result) != KERN_SUCCESS)
24643561Skato		panic("kmem_suballoc: unable to change range to submap");
24743561Skato	return (result);
248128710Sru}
249128710Sru
250128710Sru/*
251249846Sdim *	kmem_malloc:
252128710Sru *
253128710Sru * 	Allocate wired-down memory in the kernel's address map for the higher
254128710Sru * 	level kernel memory allocator (kern/kern_malloc.c).  We cannot use
25561064Snyan * 	kmem_alloc() because we may need to allocate memory at interrupt
25661064Snyan * 	level where we cannot block (canwait == FALSE).
25761064Snyan *
25861064Snyan * 	This routine has its own private kernel submap (kmem_map) and object
25961064Snyan * 	(kmem_object).  This, combined with the fact that only malloc uses
26061064Snyan * 	this routine, ensures that we will never block in map or object waits.
26161064Snyan *
26261064Snyan * 	Note that this still only works in a uni-processor environment and
263128710Sru * 	when called at splhigh().
264128710Sru *
265128710Sru * 	We don't worry about expanding the map (adding entries) since entries
26661064Snyan * 	for wired maps are statically allocated.
26761064Snyan *
26861064Snyan *	NOTE:  This routine is not supposed to block if M_NOWAIT is set, but
26961064Snyan *	I have not verified that it actually does not block.
27061064Snyan *
27161064Snyan *	`map' is ONLY allowed to be kmem_map or one of the mbuf submaps to
27261064Snyan *	which we never free.
27361064Snyan */
27461064Snyanvm_offset_t
275128710Srukmem_malloc(map, size, flags)
276128710Sru	vm_map_t map;
277128710Sru	vm_size_t size;
27843561Skato	int flags;
27961064Snyan{
28043561Skato	vm_offset_t offset, i;
28143561Skato	vm_map_entry_t entry;
28243561Skato	vm_offset_t addr;
283200254Snyan	vm_page_t m;
284200254Snyan	int pflags;
28543561Skato
286128710Sru	size = round_page(size);
287128710Sru	addr = vm_map_min(map);
288128710Sru
28961064Snyan	/*
29061064Snyan	 * Locate sufficient space in the map.  This will give us the final
29161064Snyan	 * virtual address for the new memory, and thus will tell us the
29261064Snyan	 * offset within the kernel map.
29343561Skato	 */
29443561Skato	vm_map_lock(map);
29543561Skato	if (vm_map_findspace(map, vm_map_min(map), size, &addr)) {
29643561Skato		vm_map_unlock(map);
29743561Skato                if ((flags & M_NOWAIT) == 0) {
29843561Skato			for (i = 0; i < 8; i++) {
29943561Skato				EVENTHANDLER_INVOKE(vm_lowmem, 0);
30043561Skato				uma_reclaim();
30143561Skato				vm_map_lock(map);
30243561Skato				if (vm_map_findspace(map, vm_map_min(map),
30343561Skato				    size, &addr) == 0) {
30443561Skato					break;
30543561Skato				}
30643561Skato				vm_map_unlock(map);
30743561Skato				tsleep(&i, 0, "nokva", (hz / 4) * (i + 1));
30861064Snyan			}
30943561Skato			if (i == 8) {
31061064Snyan				panic("kmem_malloc(%ld): kmem_map too small: %ld total allocated",
31143561Skato				    (long)size, (long)map->size);
31261064Snyan			}
31361064Snyan		} else {
314128710Sru			return (0);
315128710Sru		}
316128710Sru	}
31761064Snyan	offset = addr - VM_MIN_KERNEL_ADDRESS;
31843561Skato	vm_object_reference(kmem_object);
31961064Snyan	vm_map_insert(map, kmem_object, offset, addr, addr + size,
32043561Skato		VM_PROT_ALL, VM_PROT_ALL, 0);
32161064Snyan
32243561Skato	/*
32361064Snyan	 * Note: if M_NOWAIT specified alone, allocate from
32443561Skato	 * interrupt-safe queues only (just the free list).  If
32561064Snyan	 * M_USE_RESERVE is also specified, we can also
32643561Skato	 * allocate from the cache.  Neither of the latter two
32761064Snyan	 * flags may be specified from an interrupt since interrupts
32843561Skato	 * are not allowed to mess with the cache queue.
32961064Snyan	 */
33043561Skato
33161064Snyan	if ((flags & (M_NOWAIT|M_USE_RESERVE)) == M_NOWAIT)
33243561Skato		pflags = VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED;
33361064Snyan	else
33443561Skato		pflags = VM_ALLOC_SYSTEM | VM_ALLOC_WIRED;
33561064Snyan
33643561Skato	if (flags & M_ZERO)
33761064Snyan		pflags |= VM_ALLOC_ZERO;
33843561Skato
33961064Snyan	VM_OBJECT_LOCK(kmem_object);
340200254Snyan	for (i = 0; i < size; i += PAGE_SIZE) {
34161064Snyanretry:
34243561Skato		m = vm_page_alloc(kmem_object, OFF_TO_IDX(offset + i), pflags);
34361064Snyan
34443561Skato		/*
345128710Sru		 * Ran out of space, free everything up and return. Don't need
346128710Sru		 * to lock page queues here as we know that the pages we got
347128710Sru		 * aren't on any queues.
34843561Skato		 */
34943561Skato		if (m == NULL) {
350128710Sru			if ((flags & M_NOWAIT) == 0) {
351128710Sru				VM_OBJECT_UNLOCK(kmem_object);
352128710Sru				vm_map_unlock(map);
35343561Skato				VM_WAIT;
35443561Skato				vm_map_lock(map);
35543561Skato				VM_OBJECT_LOCK(kmem_object);
35643561Skato				goto retry;
35743561Skato			}
35843561Skato			/*
35943561Skato			 * Free the pages before removing the map entry.
36043561Skato			 * They are already marked busy.  Calling
36143561Skato			 * vm_map_delete before the pages has been freed or
36243561Skato			 * unbusied will cause a deadlock.
36343561Skato			 */
36443561Skato			while (i != 0) {
365200254Snyan				i -= PAGE_SIZE;
366200254Snyan				m = vm_page_lookup(kmem_object,
367200254Snyan						   OFF_TO_IDX(offset + i));
36843561Skato				vm_page_lock_queues();
36943561Skato				vm_page_unwire(m, 0);
37043561Skato				vm_page_free(m);
37143561Skato				vm_page_unlock_queues();
37243561Skato			}
37343561Skato			VM_OBJECT_UNLOCK(kmem_object);
37443561Skato			vm_map_delete(map, addr, addr + size);
37543561Skato			vm_map_unlock(map);
376200254Snyan			return (0);
37743561Skato		}
37843561Skato		if (flags & M_ZERO && (m->flags & PG_ZERO) == 0)
37943561Skato			pmap_zero_page(m);
38043561Skato		m->valid = VM_PAGE_BITS_ALL;
381200254Snyan		KASSERT((m->flags & PG_UNMANAGED) != 0,
38243561Skato		    ("kmem_malloc: page %p is managed", m));
38343561Skato	}
38443561Skato	VM_OBJECT_UNLOCK(kmem_object);
38543561Skato
38643561Skato	/*
38743561Skato	 * Mark map entry as non-pageable. Assert: vm_map_insert() will never
38843561Skato	 * be able to extend the previous entry so there will be a new entry
38943561Skato	 * exactly corresponding to this address range and it will have
39043561Skato	 * wired_count == 0.
39143561Skato	 */
39243561Skato	if (!vm_map_lookup_entry(map, addr, &entry) ||
39343561Skato	    entry->start != addr || entry->end != addr + size ||
39443561Skato	    entry->wired_count != 0)
39543561Skato		panic("kmem_malloc: entry not found or misaligned");
39643561Skato	entry->wired_count = 1;
39743561Skato
39843561Skato	/*
39943561Skato	 * At this point, the kmem_object must be unlocked because
40043561Skato	 * vm_map_simplify_entry() calls vm_object_deallocate(), which
40143561Skato	 * locks the kmem_object.
40243561Skato	 */
40343561Skato	vm_map_simplify_entry(map, entry);
40443561Skato
40543561Skato	/*
40643561Skato	 * Loop thru pages, entering them in the pmap.
40743561Skato	 */
40886497Snyan	VM_OBJECT_LOCK(kmem_object);
40986497Snyan	for (i = 0; i < size; i += PAGE_SIZE) {
410200254Snyan		m = vm_page_lookup(kmem_object, OFF_TO_IDX(offset + i));
41186497Snyan		/*
41286497Snyan		 * Because this is kernel_pmap, this call will not block.
41343561Skato		 */
41443561Skato		pmap_enter(kernel_pmap, addr + i, VM_PROT_ALL, m, VM_PROT_ALL,
41552202Skato		    TRUE);
416128710Sru		vm_page_wakeup(m);
417128710Sru	}
418128710Sru	VM_OBJECT_UNLOCK(kmem_object);
41958871Skato	vm_map_unlock(map);
42058871Skato
42158871Skato	return (addr);
422128710Sru}
423200254Snyan
424128710Sru/*
42561064Snyan *	kmem_alloc_wait:
42643561Skato *
42761064Snyan *	Allocates pageable memory from a sub-map of the kernel.  If the submap
42843561Skato *	has no room, the caller sleeps waiting for more memory in the submap.
42961064Snyan *
43043561Skato *	This routine may block.
43161064Snyan */
43243561Skatovm_offset_t
43361064Snyankmem_alloc_wait(map, size)
43443561Skato	vm_map_t map;
43561064Snyan	vm_size_t size;
43643561Skato{
43761064Snyan	vm_offset_t addr;
43843561Skato
43961064Snyan	size = round_page(size);
44043561Skato
44161064Snyan	for (;;) {
44243561Skato		/*
44361064Snyan		 * To make this work for more than one map, use the map's lock
44443561Skato		 * to lock out sleepers/wakers.
44561064Snyan		 */
44643561Skato		vm_map_lock(map);
44761064Snyan		if (vm_map_findspace(map, vm_map_min(map), size, &addr) == 0)
44843561Skato			break;
44961064Snyan		/* no space now; see if we can ever get space */
45043561Skato		if (vm_map_max(map) - vm_map_min(map) < size) {
45161064Snyan			vm_map_unlock(map);
45243561Skato			return (0);
45361064Snyan		}
45443561Skato		map->needs_wakeup = TRUE;
45561064Snyan		vm_map_unlock_and_wait(map, 0);
45643561Skato	}
457200254Snyan	vm_map_insert(map, NULL, 0, addr, addr + size, VM_PROT_ALL, VM_PROT_ALL, 0);
458128710Sru	vm_map_unlock(map);
459200254Snyan	return (addr);
460128710Sru}
461200254Snyan
462128710Sru/*
463200254Snyan *	kmem_free_wakeup:
464200254Snyan *
465200254Snyan *	Returns memory to a submap of the kernel, and wakes up any processes
466200254Snyan *	waiting for memory in that map.
467200254Snyan */
468200254Snyanvoid
469200254Snyankmem_free_wakeup(map, addr, size)
470200254Snyan	vm_map_t map;
471200254Snyan	vm_offset_t addr;
472200254Snyan	vm_size_t size;
473200254Snyan{
474200254Snyan
475200254Snyan	vm_map_lock(map);
476200254Snyan	(void) vm_map_delete(map, trunc_page(addr), round_page(addr + size));
477200254Snyan	if (map->needs_wakeup) {
478200254Snyan		map->needs_wakeup = FALSE;
479200254Snyan		vm_map_wakeup(map);
480200254Snyan	}
481200254Snyan	vm_map_unlock(map);
482200254Snyan}
483200254Snyan
484200254Snyan/*
485200254Snyan * 	kmem_init:
486200254Snyan *
487200254Snyan *	Create the kernel map; insert a mapping covering kernel text,
488200254Snyan *	data, bss, and all space allocated thus far (`boostrap' data).  The
489200254Snyan *	new map will thus map the range between VM_MIN_KERNEL_ADDRESS and
490200254Snyan *	`start' as allocated, and the range between `start' and `end' as free.
491200254Snyan */
492200254Snyanvoid
493200254Snyankmem_init(start, end)
494200254Snyan	vm_offset_t start, end;
495200254Snyan{
496200254Snyan	vm_map_t m;
497200254Snyan
498200254Snyan	m = vm_map_create(kernel_pmap, VM_MIN_KERNEL_ADDRESS, end);
499200254Snyan	m->system_map = 1;
500200254Snyan	vm_map_lock(m);
501128710Sru	/* N.B.: cannot use kgdb to debug, starting with this assignment ... */
502200254Snyan	kernel_map = m;
503200254Snyan	(void) vm_map_insert(m, NULL, (vm_ooffset_t) 0,
50443561Skato	    VM_MIN_KERNEL_ADDRESS, start, VM_PROT_ALL, VM_PROT_ALL,
505200254Snyan	    MAP_NOFAULT);
506200254Snyan	/* ... and ending with the completion of the above `insert' */
507200254Snyan	vm_map_unlock(m);
50861064Snyan}
50943561Skato