vm_kern.c revision 98455
10SN/A/* 211536Spsandoz * Copyright (c) 1991, 1993 30SN/A * The Regents of the University of California. All rights reserved. 40SN/A * 50SN/A * This code is derived from software contributed to Berkeley by 60SN/A * The Mach Operating System project at Carnegie-Mellon University. 72362SN/A * 80SN/A * Redistribution and use in source and binary forms, with or without 92362SN/A * modification, are permitted provided that the following conditions 100SN/A * are met: 110SN/A * 1. Redistributions of source code must retain the above copyright 120SN/A * notice, this list of conditions and the following disclaimer. 130SN/A * 2. Redistributions in binary form must reproduce the above copyright 140SN/A * notice, this list of conditions and the following disclaimer in the 150SN/A * documentation and/or other materials provided with the distribution. 160SN/A * 3. All advertising materials mentioning features or use of this software 170SN/A * must display the following acknowledgement: 180SN/A * This product includes software developed by the University of 190SN/A * California, Berkeley and its contributors. 200SN/A * 4. Neither the name of the University nor the names of its contributors 212362SN/A * may be used to endorse or promote products derived from this software 222362SN/A * without specific prior written permission. 232362SN/A * 240SN/A * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 250SN/A * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 260SN/A * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 270SN/A * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 2811536Spsandoz * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 2911536Spsandoz * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 3011536Spsandoz * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 317098SN/A * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 3211536Spsandoz * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 3311536Spsandoz * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 3411536Spsandoz * SUCH DAMAGE. 3511536Spsandoz * 3611536Spsandoz * from: @(#)vm_kern.c 8.3 (Berkeley) 1/12/94 3711536Spsandoz * 380SN/A * 390SN/A * Copyright (c) 1987, 1990 Carnegie-Mellon University. 407643SN/A * All rights reserved. 417643SN/A * 420SN/A * Authors: Avadis Tevanian, Jr., Michael Wayne Young 430SN/A * 440SN/A * Permission to use, copy, modify and distribute this software and 450SN/A * its documentation is hereby granted, provided that both the copyright 460SN/A * notice and this permission notice appear in all copies of the 470SN/A * software, derivative works or modified versions, and any portions 480SN/A * thereof, and that both notices appear in supporting documentation. 490SN/A * 500SN/A * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 510SN/A * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 520SN/A * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 530SN/A * 540SN/A * Carnegie Mellon requests users of this software to return to 550SN/A * 560SN/A * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 570SN/A * School of Computer Science 580SN/A * Carnegie Mellon University 590SN/A * Pittsburgh PA 15213-3890 600SN/A * 610SN/A * any improvements or extensions that they make and grant Carnegie the 620SN/A * rights to redistribute these changes. 630SN/A * 640SN/A * $FreeBSD: head/sys/vm/vm_kern.c 98455 2002-06-19 23:49:57Z jeff $ 650SN/A */ 6612498Sigerasim 670SN/A/* 680SN/A * Kernel memory management. 690SN/A */ 700SN/A 710SN/A#include <sys/param.h> 720SN/A#include <sys/systm.h> 730SN/A#include <sys/kernel.h> /* for ticks and hz */ 740SN/A#include <sys/lock.h> 750SN/A#include <sys/mutex.h> 760SN/A#include <sys/proc.h> 779697SN/A#include <sys/malloc.h> 789697SN/A 799697SN/A#include <vm/vm.h> 809697SN/A#include <vm/vm_param.h> 810SN/A#include <vm/pmap.h> 820SN/A#include <vm/vm_map.h> 830SN/A#include <vm/vm_object.h> 840SN/A#include <vm/vm_page.h> 850SN/A#include <vm/vm_pageout.h> 860SN/A#include <vm/vm_extern.h> 870SN/A 880SN/Avm_map_t kernel_map=0; 890SN/Avm_map_t kmem_map=0; 900SN/Avm_map_t exec_map=0; 910SN/Avm_map_t clean_map=0; 920SN/Avm_map_t buffer_map=0; 930SN/A 940SN/A/* 950SN/A * kmem_alloc_pageable: 960SN/A * 970SN/A * Allocate pageable memory to the kernel's address map. 980SN/A * "map" must be kernel_map or a submap of kernel_map. 990SN/A */ 1000SN/Avm_offset_t 1010SN/Akmem_alloc_pageable(map, size) 1020SN/A vm_map_t map; 1030SN/A vm_size_t size; 1040SN/A{ 1050SN/A vm_offset_t addr; 1060SN/A int result; 1070SN/A 1080SN/A GIANT_REQUIRED; 1090SN/A 1100SN/A size = round_page(size); 1110SN/A addr = vm_map_min(map); 1120SN/A result = vm_map_find(map, NULL, (vm_offset_t) 0, 1130SN/A &addr, size, TRUE, VM_PROT_ALL, VM_PROT_ALL, 0); 1140SN/A if (result != KERN_SUCCESS) { 1150SN/A return (0); 1160SN/A } 1170SN/A return (addr); 1180SN/A} 1190SN/A 1200SN/A/* 1210SN/A * kmem_alloc_nofault: 1220SN/A * 1230SN/A * Same as kmem_alloc_pageable, except that it create a nofault entry. 1240SN/A */ 1250SN/Avm_offset_t 1260SN/Akmem_alloc_nofault(map, size) 1270SN/A vm_map_t map; 1280SN/A vm_size_t size; 1290SN/A{ 1300SN/A vm_offset_t addr; 1310SN/A int result; 1320SN/A 1330SN/A GIANT_REQUIRED; 1340SN/A 1350SN/A size = round_page(size); 1360SN/A addr = vm_map_min(map); 1370SN/A result = vm_map_find(map, NULL, (vm_offset_t) 0, 1380SN/A &addr, size, TRUE, VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT); 1390SN/A if (result != KERN_SUCCESS) { 1400SN/A return (0); 1410SN/A } 1420SN/A return (addr); 1430SN/A} 1440SN/A 1450SN/A/* 1460SN/A * Allocate wired-down memory in the kernel's address map 1470SN/A * or a submap. 1480SN/A */ 1490SN/Avm_offset_t 1500SN/Akmem_alloc(map, size) 1510SN/A vm_map_t map; 1520SN/A vm_size_t size; 1530SN/A{ 1540SN/A vm_offset_t addr; 1550SN/A vm_offset_t offset; 1560SN/A vm_offset_t i; 1570SN/A 1580SN/A GIANT_REQUIRED; 1590SN/A 1600SN/A size = round_page(size); 1610SN/A 1620SN/A /* 1630SN/A * Use the kernel object for wired-down kernel pages. Assume that no 1640SN/A * region of the kernel object is referenced more than once. 1650SN/A */ 1660SN/A 1670SN/A /* 1680SN/A * Locate sufficient space in the map. This will give us the final 1690SN/A * virtual address for the new memory, and thus will tell us the 1700SN/A * offset within the kernel map. 1710SN/A */ 1720SN/A vm_map_lock(map); 1730SN/A if (vm_map_findspace(map, vm_map_min(map), size, &addr)) { 1740SN/A vm_map_unlock(map); 1750SN/A return (0); 1760SN/A } 1770SN/A offset = addr - VM_MIN_KERNEL_ADDRESS; 1780SN/A vm_object_reference(kernel_object); 1790SN/A vm_map_insert(map, kernel_object, offset, addr, addr + size, 1800SN/A VM_PROT_ALL, VM_PROT_ALL, 0); 18114407Ssherman vm_map_unlock(map); 18214407Ssherman 18314407Ssherman /* 18414407Ssherman * Guarantee that there are pages already in this object before 18514407Ssherman * calling vm_map_pageable. This is to prevent the following 18614407Ssherman * scenario: 18714407Ssherman * 18814407Ssherman * 1) Threads have swapped out, so that there is a pager for the 1890SN/A * kernel_object. 2) The kmsg zone is empty, and so we are 1900SN/A * kmem_allocing a new page for it. 3) vm_map_pageable calls vm_fault; 1910SN/A * there is no page, but there is a pager, so we call 1920SN/A * pager_data_request. But the kmsg zone is empty, so we must 1930SN/A * kmem_alloc. 4) goto 1 5) Even if the kmsg zone is not empty: when 1940SN/A * we get the data back from the pager, it will be (very stale) 1950SN/A * non-zero data. kmem_alloc is defined to return zero-filled memory. 1960SN/A * 1970SN/A * We're intentionally not activating the pages we allocate to prevent a 1980SN/A * race with page-out. vm_map_pageable will wire the pages. 1990SN/A */ 2000SN/A for (i = 0; i < size; i += PAGE_SIZE) { 2010SN/A vm_page_t mem; 2020SN/A 2030SN/A mem = vm_page_grab(kernel_object, OFF_TO_IDX(offset + i), 2040SN/A VM_ALLOC_ZERO | VM_ALLOC_RETRY); 2050SN/A if ((mem->flags & PG_ZERO) == 0) 2060SN/A vm_page_zero_fill(mem); 2070SN/A mem->valid = VM_PAGE_BITS_ALL; 2080SN/A vm_page_flag_clear(mem, PG_ZERO); 2090SN/A vm_page_wakeup(mem); 2100SN/A } 2110SN/A 2120SN/A /* 2130SN/A * And finally, mark the data as non-pageable. 2140SN/A */ 2150SN/A (void) vm_map_wire(map, addr, addr + size, FALSE); 2160SN/A 2170SN/A return (addr); 2180SN/A} 2190SN/A 2200SN/A/* 2210SN/A * kmem_free: 2220SN/A * 2230SN/A * Release a region of kernel virtual memory allocated 2240SN/A * with kmem_alloc, and return the physical pages 2250SN/A * associated with that region. 2260SN/A * 2270SN/A * This routine may not block on kernel maps. 2280SN/A */ 22911536Spsandozvoid 23011536Spsandozkmem_free(map, addr, size) 23111536Spsandoz vm_map_t map; 23211536Spsandoz vm_offset_t addr; 23311536Spsandoz vm_size_t size; 2340SN/A{ 2350SN/A GIANT_REQUIRED; 2360SN/A 2370SN/A (void) vm_map_remove(map, trunc_page(addr), round_page(addr + size)); 2380SN/A} 2390SN/A 2400SN/A/* 2410SN/A * kmem_suballoc: 2420SN/A * 2430SN/A * Allocates a map to manage a subrange 2440SN/A * of the kernel virtual address space. 2450SN/A * 2460SN/A * Arguments are as follows: 2470SN/A * 2480SN/A * parent Map to take range from 2490SN/A * min, max Returned endpoints of map 25014407Ssherman * size Size of range to find 2510SN/A */ 2520SN/Avm_map_t 2530SN/Akmem_suballoc(parent, min, max, size) 2540SN/A vm_map_t parent; 2550SN/A vm_offset_t *min, *max; 2560SN/A vm_size_t size; 2570SN/A{ 2580SN/A int ret; 2590SN/A vm_map_t result; 2600SN/A 2610SN/A GIANT_REQUIRED; 2620SN/A 2630SN/A size = round_page(size); 2640SN/A 2650SN/A *min = (vm_offset_t) vm_map_min(parent); 2660SN/A ret = vm_map_find(parent, NULL, (vm_offset_t) 0, 2670SN/A min, size, TRUE, VM_PROT_ALL, VM_PROT_ALL, 0); 2680SN/A if (ret != KERN_SUCCESS) { 2690SN/A printf("kmem_suballoc: bad status return of %d.\n", ret); 27012546Savstepan panic("kmem_suballoc"); 2710SN/A } 2720SN/A *max = *min + size; 2730SN/A result = vm_map_create(vm_map_pmap(parent), *min, *max); 27411536Spsandoz if (result == NULL) 27511536Spsandoz panic("kmem_suballoc: cannot create submap"); 27611536Spsandoz if (vm_map_submap(parent, *min, *max, result) != KERN_SUCCESS) 27711536Spsandoz panic("kmem_suballoc: unable to change range to submap"); 27811536Spsandoz return (result); 27911536Spsandoz} 28011536Spsandoz 28111536Spsandoz/* 28211536Spsandoz * kmem_malloc: 28311536Spsandoz * 28411536Spsandoz * Allocate wired-down memory in the kernel's address map for the higher 28511536Spsandoz * level kernel memory allocator (kern/kern_malloc.c). We cannot use 28611536Spsandoz * kmem_alloc() because we may need to allocate memory at interrupt 28711536Spsandoz * level where we cannot block (canwait == FALSE). 28811536Spsandoz * 28911536Spsandoz * This routine has its own private kernel submap (kmem_map) and object 29011536Spsandoz * (kmem_object). This, combined with the fact that only malloc uses 29111536Spsandoz * this routine, ensures that we will never block in map or object waits. 29211536Spsandoz * 29311536Spsandoz * Note that this still only works in a uni-processor environment and 29411536Spsandoz * when called at splhigh(). 29511536Spsandoz * 29611536Spsandoz * We don't worry about expanding the map (adding entries) since entries 29711536Spsandoz * for wired maps are statically allocated. 29811536Spsandoz * 29911536Spsandoz * NOTE: This routine is not supposed to block if M_NOWAIT is set, but 30011536Spsandoz * I have not verified that it actually does not block. 30111536Spsandoz * 30211536Spsandoz * `map' is ONLY allowed to be kmem_map or one of the mbuf submaps to 30311536Spsandoz * which we never free. 30411624Ssherman */ 30511536Spsandozvm_offset_t 30611536Spsandozkmem_malloc(map, size, flags) 30711536Spsandoz vm_map_t map; 30811536Spsandoz vm_size_t size; 30911536Spsandoz int flags; 31011624Ssherman{ 31111536Spsandoz vm_offset_t offset, i; 31211536Spsandoz vm_map_entry_t entry; 31311536Spsandoz vm_offset_t addr; 31411536Spsandoz vm_page_t m; 31511536Spsandoz int pflags; 31611536Spsandoz 31711536Spsandoz GIANT_REQUIRED; 31811624Ssherman 31911536Spsandoz size = round_page(size); 32011536Spsandoz addr = vm_map_min(map); 32111536Spsandoz 32211536Spsandoz /* 32311536Spsandoz * Locate sufficient space in the map. This will give us the final 32411624Ssherman * virtual address for the new memory, and thus will tell us the 32511536Spsandoz * offset within the kernel map. 32611536Spsandoz */ 32711536Spsandoz vm_map_lock(map); 32811536Spsandoz if (vm_map_findspace(map, vm_map_min(map), size, &addr)) { 32911536Spsandoz vm_map_unlock(map); 33011536Spsandoz if (map != kmem_map) { 33111536Spsandoz static int last_report; /* when we did it (in ticks) */ 33211536Spsandoz if (ticks < last_report || 33311536Spsandoz (ticks - last_report) >= hz) { 33411536Spsandoz last_report = ticks; 33511536Spsandoz printf("Out of mbuf address space!\n"); 33611536Spsandoz printf("Consider increasing NMBCLUSTERS\n"); 33711624Ssherman } 33811536Spsandoz goto bad; 33911536Spsandoz } 34011536Spsandoz if ((flags & M_NOWAIT) == 0) 34111536Spsandoz panic("kmem_malloc(%ld): kmem_map too small: %ld total allocated", 34211536Spsandoz (long)size, (long)map->size); 34311624Ssherman goto bad; 34411536Spsandoz } 34511536Spsandoz offset = addr - VM_MIN_KERNEL_ADDRESS; 34611536Spsandoz vm_object_reference(kmem_object); 34711536Spsandoz vm_map_insert(map, kmem_object, offset, addr, addr + size, 34811536Spsandoz VM_PROT_ALL, VM_PROT_ALL, 0); 34911536Spsandoz 35011624Ssherman /* 35111624Ssherman * Note: if M_NOWAIT specified alone, allocate from 35211624Ssherman * interrupt-safe queues only (just the free list). If 35311624Ssherman * M_USE_RESERVE is also specified, we can also 35411624Ssherman * allocate from the cache. Neither of the latter two 35511624Ssherman * flags may be specified from an interrupt since interrupts 3560SN/A * are not allowed to mess with the cache queue. 3570SN/A */ 3580SN/A 35912546Savstepan if ((flags & (M_NOWAIT|M_USE_RESERVE)) == M_NOWAIT) 3600SN/A pflags = VM_ALLOC_INTERRUPT; 3610SN/A else 3620SN/A pflags = VM_ALLOC_SYSTEM; 3630SN/A 3640SN/A if (flags & M_ZERO) 3650SN/A pflags |= VM_ALLOC_ZERO; 3660SN/A 3670SN/A 3680SN/A for (i = 0; i < size; i += PAGE_SIZE) { 3690SN/Aretry: 3700SN/A m = vm_page_alloc(kmem_object, OFF_TO_IDX(offset + i), pflags); 37112546Savstepan 3720SN/A /* 3730SN/A * Ran out of space, free everything up and return. Don't need 3740SN/A * to lock page queues here as we know that the pages we got 3750SN/A * aren't on any queues. 3760SN/A */ 3770SN/A if (m == NULL) { 3780SN/A if ((flags & M_NOWAIT) == 0) { 3790SN/A vm_map_unlock(map); 3800SN/A VM_WAIT; 3810SN/A vm_map_lock(map); 3820SN/A goto retry; 3830SN/A } 3840SN/A /* 3850SN/A * Free the pages before removing the map entry. 3860SN/A * They are already marked busy. Calling 38714407Ssherman * vm_map_delete before the pages has been freed or 38811536Spsandoz * unbusied will cause a deadlock. 3890SN/A */ 3900SN/A while (i != 0) { 3910SN/A i -= PAGE_SIZE; 3920SN/A m = vm_page_lookup(kmem_object, 3930SN/A OFF_TO_IDX(offset + i)); 3940SN/A vm_page_free(m); 3950SN/A } 3960SN/A vm_map_delete(map, addr, addr + size); 3970SN/A vm_map_unlock(map); 3980SN/A goto bad; 3990SN/A } 4000SN/A if (flags & M_ZERO && (m->flags & PG_ZERO) == 0) 4010SN/A vm_page_zero_fill(m); 4020SN/A vm_page_flag_clear(m, PG_ZERO); 4030SN/A m->valid = VM_PAGE_BITS_ALL; 4040SN/A } 4050SN/A 4060SN/A /* 4070SN/A * Mark map entry as non-pageable. Assert: vm_map_insert() will never 4080SN/A * be able to extend the previous entry so there will be a new entry 4090SN/A * exactly corresponding to this address range and it will have 41014407Ssherman * wired_count == 0. 41114407Ssherman */ 41214407Ssherman if (!vm_map_lookup_entry(map, addr, &entry) || 41314407Ssherman entry->start != addr || entry->end != addr + size || 4140SN/A entry->wired_count != 0) 4150SN/A panic("kmem_malloc: entry not found or misaligned"); 4160SN/A entry->wired_count = 1; 41711536Spsandoz 4180SN/A vm_map_simplify_entry(map, entry); 4190SN/A 4200SN/A /* 4210SN/A * Loop thru pages, entering them in the pmap. (We cannot add them to 4220SN/A * the wired count without wrapping the vm_page_queue_lock in 4230SN/A * splimp...) 4240SN/A */ 4250SN/A for (i = 0; i < size; i += PAGE_SIZE) { 4260SN/A m = vm_page_lookup(kmem_object, OFF_TO_IDX(offset + i)); 4270SN/A vm_page_wire(m); 4280SN/A vm_page_wakeup(m); 4290SN/A /* 4300SN/A * Because this is kernel_pmap, this call will not block. 4310SN/A */ 4320SN/A pmap_enter(kernel_pmap, addr + i, m, VM_PROT_ALL, 1); 4330SN/A vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE | PG_REFERENCED); 4340SN/A } 4350SN/A vm_map_unlock(map); 4360SN/A 4370SN/A return (addr); 4380SN/A 4390SN/Abad: 4400SN/A return (0); 4417643SN/A} 4420SN/A 4430SN/A/* 4440SN/A * kmem_alloc_wait: 4450SN/A * 4460SN/A * Allocates pageable memory from a sub-map of the kernel. If the submap 4470SN/A * has no room, the caller sleeps waiting for more memory in the submap. 4480SN/A * 4490SN/A * This routine may block. 4500SN/A */ 4510SN/Avm_offset_t 4520SN/Akmem_alloc_wait(map, size) 4530SN/A vm_map_t map; 4540SN/A vm_size_t size; 4550SN/A{ 4560SN/A vm_offset_t addr; 4570SN/A 4580SN/A GIANT_REQUIRED; 4590SN/A 4600SN/A size = round_page(size); 46112546Savstepan 46212546Savstepan for (;;) { 4630SN/A /* 4640SN/A * To make this work for more than one map, use the map's lock 4650SN/A * to lock out sleepers/wakers. 4660SN/A */ 4670SN/A vm_map_lock(map); 46812546Savstepan if (vm_map_findspace(map, vm_map_min(map), size, &addr) == 0) 4690SN/A break; 4700SN/A /* no space now; see if we can ever get space */ 4710SN/A if (vm_map_max(map) - vm_map_min(map) < size) { 4720SN/A vm_map_unlock(map); 4730SN/A return (0); 4740SN/A } 4750SN/A vm_map_unlock(map); 4760SN/A tsleep(map, PVM, "kmaw", 0); 4770SN/A } 4780SN/A vm_map_insert(map, NULL, (vm_offset_t) 0, addr, addr + size, VM_PROT_ALL, VM_PROT_ALL, 0); 4790SN/A vm_map_unlock(map); 4800SN/A return (addr); 4810SN/A} 4827098SN/A 4830SN/A/* 4840SN/A * kmem_free_wakeup: 4850SN/A * 4860SN/A * Returns memory to a submap of the kernel, and wakes up any processes 4870SN/A * waiting for memory in that map. 4887098SN/A */ 4897098SN/Avoid 4907098SN/Akmem_free_wakeup(map, addr, size) 4917098SN/A vm_map_t map; 4927098SN/A vm_offset_t addr; 4937098SN/A vm_size_t size; 4947098SN/A{ 4957098SN/A GIANT_REQUIRED; 4967098SN/A 4977098SN/A vm_map_lock(map); 4987098SN/A (void) vm_map_delete(map, trunc_page(addr), round_page(addr + size)); 4997098SN/A wakeup(map); 5007098SN/A vm_map_unlock(map); 5017098SN/A} 5027098SN/A 5037098SN/A/* 5047098SN/A * kmem_init: 5057098SN/A * 5067098SN/A * Create the kernel map; insert a mapping covering kernel text, 5077098SN/A * data, bss, and all space allocated thus far (`boostrap' data). The 5087098SN/A * new map will thus map the range between VM_MIN_KERNEL_ADDRESS and 5097098SN/A * `start' as allocated, and the range between `start' and `end' as free. 5107098SN/A */ 5117098SN/Avoid 5127098SN/Akmem_init(start, end) 5137643SN/A vm_offset_t start, end; 5140SN/A{ 5150SN/A vm_map_t m; 5160SN/A 5170SN/A m = vm_map_create(kernel_pmap, VM_MIN_KERNEL_ADDRESS, end); 5180SN/A vm_map_lock(m); 5190SN/A /* N.B.: cannot use kgdb to debug, starting with this assignment ... */ 5200SN/A kernel_map = m; 5210SN/A kernel_map->system_map = 1; 5220SN/A (void) vm_map_insert(m, NULL, (vm_offset_t) 0, 5230SN/A VM_MIN_KERNEL_ADDRESS, start, VM_PROT_ALL, VM_PROT_ALL, 0); 5240SN/A /* ... and ending with the completion of the above `insert' */ 5250SN/A vm_map_unlock(m); 5260SN/A} 5270SN/A