Deleted Added
full compact
vm_kern.c (2112) vm_kern.c (5455)
1/*
1/*
2 * Copyright (c) 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions

--- 25 unchanged lines hidden (view full) ---

35 *
36 * from: @(#)vm_kern.c 8.3 (Berkeley) 1/12/94
37 *
38 *
39 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40 * All rights reserved.
41 *
42 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
2 * Copyright (c) 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions

--- 25 unchanged lines hidden (view full) ---

35 *
36 * from: @(#)vm_kern.c 8.3 (Berkeley) 1/12/94
37 *
38 *
39 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40 * All rights reserved.
41 *
42 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
43 *
43 *
44 * Permission to use, copy, modify and distribute this software and
45 * its documentation is hereby granted, provided that both the copyright
46 * notice and this permission notice appear in all copies of the
47 * software, derivative works or modified versions, and any portions
48 * thereof, and that both notices appear in supporting documentation.
44 * Permission to use, copy, modify and distribute this software and
45 * its documentation is hereby granted, provided that both the copyright
46 * notice and this permission notice appear in all copies of the
47 * software, derivative works or modified versions, and any portions
48 * thereof, and that both notices appear in supporting documentation.
49 *
50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
49 *
50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
53 *
53 *
54 * Carnegie Mellon requests users of this software to return to
55 *
56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
57 * School of Computer Science
58 * Carnegie Mellon University
59 * Pittsburgh PA 15213-3890
60 *
61 * any improvements or extensions that they make and grant Carnegie the
62 * rights to redistribute these changes.
63 *
54 * Carnegie Mellon requests users of this software to return to
55 *
56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
57 * School of Computer Science
58 * Carnegie Mellon University
59 * Pittsburgh PA 15213-3890
60 *
61 * any improvements or extensions that they make and grant Carnegie the
62 * rights to redistribute these changes.
63 *
64 * $Id: vm_kern.c,v 1.6 1994/08/07 14:53:26 davidg Exp $
64 * $Id: vm_kern.c,v 1.7 1994/08/18 22:36:02 wollman Exp $
65 */
66
67/*
68 * Kernel memory management.
69 */
70
71#include <sys/param.h>
72#include <sys/systm.h>
73#include <sys/kernel.h>
74#include <sys/proc.h>
75
76#include <vm/vm.h>
77#include <vm/vm_page.h>
78#include <vm/vm_pageout.h>
79#include <vm/vm_kern.h>
80
65 */
66
67/*
68 * Kernel memory management.
69 */
70
71#include <sys/param.h>
72#include <sys/systm.h>
73#include <sys/kernel.h>
74#include <sys/proc.h>
75
76#include <vm/vm.h>
77#include <vm/vm_page.h>
78#include <vm/vm_pageout.h>
79#include <vm/vm_kern.h>
80
81vm_map_t buffer_map;
82vm_map_t kernel_map;
83vm_map_t kmem_map;
84vm_map_t mb_map;
85vm_map_t io_map;
86vm_map_t clean_map;
87vm_map_t pager_map;
88vm_map_t phys_map;
81vm_map_t buffer_map;
82vm_map_t kernel_map;
83vm_map_t kmem_map;
84vm_map_t mb_map;
85vm_map_t io_map;
86vm_map_t clean_map;
87vm_map_t pager_map;
88vm_map_t phys_map;
89vm_map_t exec_map;
90vm_map_t u_map;
89
90/*
91 * kmem_alloc_pageable:
92 *
93 * Allocate pageable memory to the kernel's address map.
94 * map must be "kernel_map" below.
95 */
96
91
92/*
93 * kmem_alloc_pageable:
94 *
95 * Allocate pageable memory to the kernel's address map.
96 * map must be "kernel_map" below.
97 */
98
97vm_offset_t kmem_alloc_pageable(map, size)
98 vm_map_t map;
99 register vm_size_t size;
99vm_offset_t
100kmem_alloc_pageable(map, size)
101 vm_map_t map;
102 register vm_size_t size;
100{
103{
101 vm_offset_t addr;
102 register int result;
104 vm_offset_t addr;
105 register int result;
103
104#if 0
105 if (map != kernel_map)
106 panic("kmem_alloc_pageable: not called with kernel_map");
107#endif
108
109 size = round_page(size);
110
111 addr = vm_map_min(map);
112 result = vm_map_find(map, NULL, (vm_offset_t) 0,
106
107#if 0
108 if (map != kernel_map)
109 panic("kmem_alloc_pageable: not called with kernel_map");
110#endif
111
112 size = round_page(size);
113
114 addr = vm_map_min(map);
115 result = vm_map_find(map, NULL, (vm_offset_t) 0,
113 &addr, size, TRUE);
116 &addr, size, TRUE);
114 if (result != KERN_SUCCESS) {
117 if (result != KERN_SUCCESS) {
115 return(0);
118 return (0);
116 }
119 }
117
118 return(addr);
120 return (addr);
119}
120
121/*
122 * Allocate wired-down memory in the kernel's address map
123 * or a submap.
124 */
121}
122
123/*
124 * Allocate wired-down memory in the kernel's address map
125 * or a submap.
126 */
125vm_offset_t kmem_alloc(map, size)
126 register vm_map_t map;
127 register vm_size_t size;
127vm_offset_t
128kmem_alloc(map, size)
129 register vm_map_t map;
130 register vm_size_t size;
128{
131{
129 vm_offset_t addr;
130 register vm_offset_t offset;
131 vm_offset_t i;
132 vm_offset_t addr;
133 register vm_offset_t offset;
134 vm_offset_t i;
132
133 size = round_page(size);
134
135 /*
135
136 size = round_page(size);
137
138 /*
136 * Use the kernel object for wired-down kernel pages.
137 * Assume that no region of the kernel object is
138 * referenced more than once.
139 * Use the kernel object for wired-down kernel pages. Assume that no
140 * region of the kernel object is referenced more than once.
139 */
140
141 /*
141 */
142
143 /*
142 * Locate sufficient space in the map. This will give us the
143 * final virtual address for the new memory, and thus will tell
144 * us the offset within the kernel map.
144 * Locate sufficient space in the map. This will give us the final
145 * virtual address for the new memory, and thus will tell us the
146 * offset within the kernel map.
145 */
146 vm_map_lock(map);
147 if (vm_map_findspace(map, 0, size, &addr)) {
148 vm_map_unlock(map);
149 return (0);
150 }
151 offset = addr - VM_MIN_KERNEL_ADDRESS;
152 vm_object_reference(kernel_object);
153 vm_map_insert(map, kernel_object, offset, addr, addr + size);
154 vm_map_unlock(map);
155
156 /*
147 */
148 vm_map_lock(map);
149 if (vm_map_findspace(map, 0, size, &addr)) {
150 vm_map_unlock(map);
151 return (0);
152 }
153 offset = addr - VM_MIN_KERNEL_ADDRESS;
154 vm_object_reference(kernel_object);
155 vm_map_insert(map, kernel_object, offset, addr, addr + size);
156 vm_map_unlock(map);
157
158 /*
157 * Guarantee that there are pages already in this object
158 * before calling vm_map_pageable. This is to prevent the
159 * following scenario:
160 *
161 * 1) Threads have swapped out, so that there is a
162 * pager for the kernel_object.
163 * 2) The kmsg zone is empty, and so we are kmem_allocing
164 * a new page for it.
165 * 3) vm_map_pageable calls vm_fault; there is no page,
166 * but there is a pager, so we call
167 * pager_data_request. But the kmsg zone is empty,
168 * so we must kmem_alloc.
169 * 4) goto 1
170 * 5) Even if the kmsg zone is not empty: when we get
171 * the data back from the pager, it will be (very
172 * stale) non-zero data. kmem_alloc is defined to
173 * return zero-filled memory.
174 *
175 * We're intentionally not activating the pages we allocate
176 * to prevent a race with page-out. vm_map_pageable will wire
177 * the pages.
159 * Guarantee that there are pages already in this object before
160 * calling vm_map_pageable. This is to prevent the following
161 * scenario:
162 *
163 * 1) Threads have swapped out, so that there is a pager for the
164 * kernel_object. 2) The kmsg zone is empty, and so we are
165 * kmem_allocing a new page for it. 3) vm_map_pageable calls vm_fault;
166 * there is no page, but there is a pager, so we call
167 * pager_data_request. But the kmsg zone is empty, so we must
168 * kmem_alloc. 4) goto 1 5) Even if the kmsg zone is not empty: when
169 * we get the data back from the pager, it will be (very stale)
170 * non-zero data. kmem_alloc is defined to return zero-filled memory.
171 *
172 * We're intentionally not activating the pages we allocate to prevent a
173 * race with page-out. vm_map_pageable will wire the pages.
178 */
179
180 vm_object_lock(kernel_object);
174 */
175
176 vm_object_lock(kernel_object);
181 for (i = 0 ; i < size; i+= PAGE_SIZE) {
182 vm_page_t mem;
177 for (i = 0; i < size; i += PAGE_SIZE) {
178 vm_page_t mem;
183
179
184 while ((mem = vm_page_alloc(kernel_object, offset+i)) == NULL) {
180 while ((mem = vm_page_alloc(kernel_object, offset + i, 0)) == NULL) {
185 vm_object_unlock(kernel_object);
186 VM_WAIT;
187 vm_object_lock(kernel_object);
188 }
189 vm_page_zero_fill(mem);
190 mem->flags &= ~PG_BUSY;
181 vm_object_unlock(kernel_object);
182 VM_WAIT;
183 vm_object_lock(kernel_object);
184 }
185 vm_page_zero_fill(mem);
186 mem->flags &= ~PG_BUSY;
187 mem->valid |= VM_PAGE_BITS_ALL;
191 }
192 vm_object_unlock(kernel_object);
188 }
189 vm_object_unlock(kernel_object);
193
190
194 /*
191 /*
195 * And finally, mark the data as non-pageable.
192 * And finally, mark the data as non-pageable.
196 */
197
198 (void) vm_map_pageable(map, (vm_offset_t) addr, addr + size, FALSE);
199
200 /*
193 */
194
195 (void) vm_map_pageable(map, (vm_offset_t) addr, addr + size, FALSE);
196
197 /*
201 * Try to coalesce the map
198 * Try to coalesce the map
202 */
199 */
203
204 vm_map_simplify(map, addr);
205
200 vm_map_simplify(map, addr);
201
206 return(addr);
202 return (addr);
207}
208
209/*
210 * kmem_free:
211 *
212 * Release a region of kernel virtual memory allocated
213 * with kmem_alloc, and return the physical pages
214 * associated with that region.
215 */
203}
204
205/*
206 * kmem_free:
207 *
208 * Release a region of kernel virtual memory allocated
209 * with kmem_alloc, and return the physical pages
210 * associated with that region.
211 */
216void kmem_free(map, addr, size)
217 vm_map_t map;
218 register vm_offset_t addr;
219 vm_size_t size;
212void
213kmem_free(map, addr, size)
214 vm_map_t map;
215 register vm_offset_t addr;
216 vm_size_t size;
220{
221 (void) vm_map_remove(map, trunc_page(addr), round_page(addr + size));
222}
223
224/*
225 * kmem_suballoc:
226 *
227 * Allocates a map to manage a subrange
228 * of the kernel virtual address space.
229 *
230 * Arguments are as follows:
231 *
232 * parent Map to take range from
233 * size Size of range to find
234 * min, max Returned endpoints of map
235 * pageable Can the region be paged
236 */
217{
218 (void) vm_map_remove(map, trunc_page(addr), round_page(addr + size));
219}
220
221/*
222 * kmem_suballoc:
223 *
224 * Allocates a map to manage a subrange
225 * of the kernel virtual address space.
226 *
227 * Arguments are as follows:
228 *
229 * parent Map to take range from
230 * size Size of range to find
231 * min, max Returned endpoints of map
232 * pageable Can the region be paged
233 */
237vm_map_t kmem_suballoc(parent, min, max, size, pageable)
238 register vm_map_t parent;
239 vm_offset_t *min, *max;
240 register vm_size_t size;
241 boolean_t pageable;
234vm_map_t
235kmem_suballoc(parent, min, max, size, pageable)
236 register vm_map_t parent;
237 vm_offset_t *min, *max;
238 register vm_size_t size;
239 boolean_t pageable;
242{
240{
243 register int ret;
244 vm_map_t result;
241 register int ret;
242 vm_map_t result;
245
246 size = round_page(size);
247
248 *min = (vm_offset_t) vm_map_min(parent);
249 ret = vm_map_find(parent, NULL, (vm_offset_t) 0,
243
244 size = round_page(size);
245
246 *min = (vm_offset_t) vm_map_min(parent);
247 ret = vm_map_find(parent, NULL, (vm_offset_t) 0,
250 min, size, TRUE);
248 min, size, TRUE);
251 if (ret != KERN_SUCCESS) {
252 printf("kmem_suballoc: bad status return of %d.\n", ret);
253 panic("kmem_suballoc");
254 }
255 *max = *min + size;
256 pmap_reference(vm_map_pmap(parent));
257 result = vm_map_create(vm_map_pmap(parent), *min, *max, pageable);
258 if (result == NULL)
259 panic("kmem_suballoc: cannot create submap");
260 if ((ret = vm_map_submap(parent, *min, *max, result)) != KERN_SUCCESS)
261 panic("kmem_suballoc: unable to change range to submap");
249 if (ret != KERN_SUCCESS) {
250 printf("kmem_suballoc: bad status return of %d.\n", ret);
251 panic("kmem_suballoc");
252 }
253 *max = *min + size;
254 pmap_reference(vm_map_pmap(parent));
255 result = vm_map_create(vm_map_pmap(parent), *min, *max, pageable);
256 if (result == NULL)
257 panic("kmem_suballoc: cannot create submap");
258 if ((ret = vm_map_submap(parent, *min, *max, result)) != KERN_SUCCESS)
259 panic("kmem_suballoc: unable to change range to submap");
262 return(result);
260 return (result);
263}
264
265/*
266 * Allocate wired-down memory in the kernel's address map for the higher
267 * level kernel memory allocator (kern/kern_malloc.c). We cannot use
268 * kmem_alloc() because we may need to allocate memory at interrupt
269 * level where we cannot block (canwait == FALSE).
270 *

--- 4 unchanged lines hidden (view full) ---

275 * Note that this still only works in a uni-processor environment and
276 * when called at splhigh().
277 *
278 * We don't worry about expanding the map (adding entries) since entries
279 * for wired maps are statically allocated.
280 */
281vm_offset_t
282kmem_malloc(map, size, canwait)
261}
262
263/*
264 * Allocate wired-down memory in the kernel's address map for the higher
265 * level kernel memory allocator (kern/kern_malloc.c). We cannot use
266 * kmem_alloc() because we may need to allocate memory at interrupt
267 * level where we cannot block (canwait == FALSE).
268 *

--- 4 unchanged lines hidden (view full) ---

273 * Note that this still only works in a uni-processor environment and
274 * when called at splhigh().
275 *
276 * We don't worry about expanding the map (adding entries) since entries
277 * for wired maps are statically allocated.
278 */
279vm_offset_t
280kmem_malloc(map, size, canwait)
283 register vm_map_t map;
284 register vm_size_t size;
285 boolean_t canwait;
281 register vm_map_t map;
282 register vm_size_t size;
283 boolean_t canwait;
286{
284{
287 register vm_offset_t offset, i;
288 vm_map_entry_t entry;
289 vm_offset_t addr;
290 vm_page_t m;
285 register vm_offset_t offset, i;
286 vm_map_entry_t entry;
287 vm_offset_t addr;
288 vm_page_t m;
291
292 if (map != kmem_map && map != mb_map)
293 panic("kern_malloc_alloc: map != {kmem,mb}_map");
294
295 size = round_page(size);
296 addr = vm_map_min(map);
297
298 /*
289
290 if (map != kmem_map && map != mb_map)
291 panic("kern_malloc_alloc: map != {kmem,mb}_map");
292
293 size = round_page(size);
294 addr = vm_map_min(map);
295
296 /*
299 * Locate sufficient space in the map. This will give us the
300 * final virtual address for the new memory, and thus will tell
301 * us the offset within the kernel map.
297 * Locate sufficient space in the map. This will give us the final
298 * virtual address for the new memory, and thus will tell us the
299 * offset within the kernel map.
302 */
303 vm_map_lock(map);
304 if (vm_map_findspace(map, 0, size, &addr)) {
305 vm_map_unlock(map);
306#if 0
300 */
301 vm_map_lock(map);
302 if (vm_map_findspace(map, 0, size, &addr)) {
303 vm_map_unlock(map);
304#if 0
307 if (canwait) /* XXX should wait */
305 if (canwait) /* XXX should wait */
308 panic("kmem_malloc: %s too small",
309 map == kmem_map ? "kmem_map" : "mb_map");
310#endif
311 if (canwait)
312 panic("kmem_malloc: map too small");
313 return (0);
314 }
315 offset = addr - vm_map_min(kmem_map);
316 vm_object_reference(kmem_object);
317 vm_map_insert(map, kmem_object, offset, addr, addr + size);
318
319 /*
306 panic("kmem_malloc: %s too small",
307 map == kmem_map ? "kmem_map" : "mb_map");
308#endif
309 if (canwait)
310 panic("kmem_malloc: map too small");
311 return (0);
312 }
313 offset = addr - vm_map_min(kmem_map);
314 vm_object_reference(kmem_object);
315 vm_map_insert(map, kmem_object, offset, addr, addr + size);
316
317 /*
320 * If we can wait, just mark the range as wired
321 * (will fault pages as necessary).
318 * If we can wait, just mark the range as wired (will fault pages as
319 * necessary).
322 */
323 if (canwait) {
324 vm_map_unlock(map);
325 (void) vm_map_pageable(map, (vm_offset_t) addr, addr + size,
320 */
321 if (canwait) {
322 vm_map_unlock(map);
323 (void) vm_map_pageable(map, (vm_offset_t) addr, addr + size,
326 FALSE);
324 FALSE);
327 vm_map_simplify(map, addr);
325 vm_map_simplify(map, addr);
328 return(addr);
326 return (addr);
329 }
327 }
330
331 /*
332 * If we cannot wait then we must allocate all memory up front,
333 * pulling it off the active queue to prevent pageout.
334 */
335 vm_object_lock(kmem_object);
336 for (i = 0; i < size; i += PAGE_SIZE) {
328 /*
329 * If we cannot wait then we must allocate all memory up front,
330 * pulling it off the active queue to prevent pageout.
331 */
332 vm_object_lock(kmem_object);
333 for (i = 0; i < size; i += PAGE_SIZE) {
337 m = vm_page_alloc(kmem_object, offset + i);
334 m = vm_page_alloc(kmem_object, offset + i, 1);
338
339 /*
335
336 /*
340 * Ran out of space, free everything up and return.
341 * Don't need to lock page queues here as we know
342 * that the pages we got aren't on any queues.
337 * Ran out of space, free everything up and return. Don't need
338 * to lock page queues here as we know that the pages we got
339 * aren't on any queues.
343 */
344 if (m == NULL) {
345 while (i != 0) {
346 i -= PAGE_SIZE;
347 m = vm_page_lookup(kmem_object, offset + i);
348 vm_page_free(m);
349 }
350 vm_object_unlock(kmem_object);
351 vm_map_delete(map, addr, addr + size);
352 vm_map_unlock(map);
340 */
341 if (m == NULL) {
342 while (i != 0) {
343 i -= PAGE_SIZE;
344 m = vm_page_lookup(kmem_object, offset + i);
345 vm_page_free(m);
346 }
347 vm_object_unlock(kmem_object);
348 vm_map_delete(map, addr, addr + size);
349 vm_map_unlock(map);
353 return(0);
350 return (0);
354 }
355#if 0
356 vm_page_zero_fill(m);
357#endif
358 m->flags &= ~PG_BUSY;
351 }
352#if 0
353 vm_page_zero_fill(m);
354#endif
355 m->flags &= ~PG_BUSY;
356 m->valid |= VM_PAGE_BITS_ALL;
359 }
360 vm_object_unlock(kmem_object);
361
362 /*
357 }
358 vm_object_unlock(kmem_object);
359
360 /*
363 * Mark map entry as non-pageable.
364 * Assert: vm_map_insert() will never be able to extend the previous
365 * entry so there will be a new entry exactly corresponding to this
366 * address range and it will have wired_count == 0.
361 * Mark map entry as non-pageable. Assert: vm_map_insert() will never
362 * be able to extend the previous entry so there will be a new entry
363 * exactly corresponding to this address range and it will have
364 * wired_count == 0.
367 */
368 if (!vm_map_lookup_entry(map, addr, &entry) ||
369 entry->start != addr || entry->end != addr + size ||
370 entry->wired_count)
371 panic("kmem_malloc: entry not found or misaligned");
372 entry->wired_count++;
373
374 /*
365 */
366 if (!vm_map_lookup_entry(map, addr, &entry) ||
367 entry->start != addr || entry->end != addr + size ||
368 entry->wired_count)
369 panic("kmem_malloc: entry not found or misaligned");
370 entry->wired_count++;
371
372 /*
375 * Loop thru pages, entering them in the pmap.
376 * (We cannot add them to the wired count without
377 * wrapping the vm_page_queue_lock in splimp...)
373 * Loop thru pages, entering them in the pmap. (We cannot add them to
374 * the wired count without wrapping the vm_page_queue_lock in
375 * splimp...)
378 */
379 for (i = 0; i < size; i += PAGE_SIZE) {
380 vm_object_lock(kmem_object);
381 m = vm_page_lookup(kmem_object, offset + i);
382 vm_object_unlock(kmem_object);
376 */
377 for (i = 0; i < size; i += PAGE_SIZE) {
378 vm_object_lock(kmem_object);
379 m = vm_page_lookup(kmem_object, offset + i);
380 vm_object_unlock(kmem_object);
383 pmap_kenter( addr + i, VM_PAGE_TO_PHYS(m));
381 pmap_kenter(addr + i, VM_PAGE_TO_PHYS(m));
384 }
385 vm_map_unlock(map);
386
387 vm_map_simplify(map, addr);
382 }
383 vm_map_unlock(map);
384
385 vm_map_simplify(map, addr);
388 return(addr);
386 return (addr);
389}
390
391/*
392 * kmem_alloc_wait
393 *
394 * Allocates pageable memory from a sub-map of the kernel. If the submap
395 * has no room, the caller sleeps waiting for more memory in the submap.
396 *
397 */
387}
388
389/*
390 * kmem_alloc_wait
391 *
392 * Allocates pageable memory from a sub-map of the kernel. If the submap
393 * has no room, the caller sleeps waiting for more memory in the submap.
394 *
395 */
398vm_offset_t kmem_alloc_wait(map, size)
399 vm_map_t map;
400 vm_size_t size;
396vm_offset_t
397kmem_alloc_wait(map, size)
398 vm_map_t map;
399 vm_size_t size;
401{
400{
402 vm_offset_t addr;
401 vm_offset_t addr;
403
404 size = round_page(size);
405
406 for (;;) {
407 /*
402
403 size = round_page(size);
404
405 for (;;) {
406 /*
408 * To make this work for more than one map,
409 * use the map's lock to lock out sleepers/wakers.
407 * To make this work for more than one map, use the map's lock
408 * to lock out sleepers/wakers.
410 */
411 vm_map_lock(map);
412 if (vm_map_findspace(map, 0, size, &addr) == 0)
413 break;
414 /* no space now; see if we can ever get space */
415 if (vm_map_max(map) - vm_map_min(map) < size) {
416 vm_map_unlock(map);
417 return (0);
418 }
409 */
410 vm_map_lock(map);
411 if (vm_map_findspace(map, 0, size, &addr) == 0)
412 break;
413 /* no space now; see if we can ever get space */
414 if (vm_map_max(map) - vm_map_min(map) < size) {
415 vm_map_unlock(map);
416 return (0);
417 }
419 assert_wait((int)map, TRUE);
418 assert_wait((int) map, TRUE);
420 vm_map_unlock(map);
421 thread_block("kmaw");
422 }
419 vm_map_unlock(map);
420 thread_block("kmaw");
421 }
423 vm_map_insert(map, NULL, (vm_offset_t)0, addr, addr + size);
422 vm_map_insert(map, NULL, (vm_offset_t) 0, addr, addr + size);
424 vm_map_unlock(map);
425 return (addr);
426}
427
428/*
429 * kmem_free_wakeup
430 *
431 * Returns memory to a submap of the kernel, and wakes up any threads
432 * waiting for memory in that map.
433 */
423 vm_map_unlock(map);
424 return (addr);
425}
426
427/*
428 * kmem_free_wakeup
429 *
430 * Returns memory to a submap of the kernel, and wakes up any threads
431 * waiting for memory in that map.
432 */
434void kmem_free_wakeup(map, addr, size)
435 vm_map_t map;
436 vm_offset_t addr;
437 vm_size_t size;
433void
434kmem_free_wakeup(map, addr, size)
435 vm_map_t map;
436 vm_offset_t addr;
437 vm_size_t size;
438{
439 vm_map_lock(map);
440 (void) vm_map_delete(map, trunc_page(addr), round_page(addr + size));
438{
439 vm_map_lock(map);
440 (void) vm_map_delete(map, trunc_page(addr), round_page(addr + size));
441 thread_wakeup((int)map);
441 thread_wakeup((int) map);
442 vm_map_unlock(map);
443}
444
445/*
446 * Create the kernel map; insert a mapping covering kernel text, data, bss,
447 * and all space allocated thus far (`boostrap' data). The new map will thus
448 * map the range between VM_MIN_KERNEL_ADDRESS and `start' as allocated, and
449 * the range between `start' and `end' as free.
450 */
442 vm_map_unlock(map);
443}
444
445/*
446 * Create the kernel map; insert a mapping covering kernel text, data, bss,
447 * and all space allocated thus far (`boostrap' data). The new map will thus
448 * map the range between VM_MIN_KERNEL_ADDRESS and `start' as allocated, and
449 * the range between `start' and `end' as free.
450 */
451void kmem_init(start, end)
451void
452kmem_init(start, end)
452 vm_offset_t start, end;
453{
454 register vm_map_t m;
455
456 m = vm_map_create(kernel_pmap, VM_MIN_KERNEL_ADDRESS, end, FALSE);
457 vm_map_lock(m);
458 /* N.B.: cannot use kgdb to debug, starting with this assignment ... */
459 kernel_map = m;
453 vm_offset_t start, end;
454{
455 register vm_map_t m;
456
457 m = vm_map_create(kernel_pmap, VM_MIN_KERNEL_ADDRESS, end, FALSE);
458 vm_map_lock(m);
459 /* N.B.: cannot use kgdb to debug, starting with this assignment ... */
460 kernel_map = m;
460 (void) vm_map_insert(m, NULL, (vm_offset_t)0,
461 (void) vm_map_insert(m, NULL, (vm_offset_t) 0,
461 VM_MIN_KERNEL_ADDRESS, start);
462 /* ... and ending with the completion of the above `insert' */
463 vm_map_unlock(m);
464}
462 VM_MIN_KERNEL_ADDRESS, start);
463 /* ... and ending with the completion of the above `insert' */
464 vm_map_unlock(m);
465}