Deleted Added
sdiff udiff text old ( 118764 ) new ( 118771 )
full compact
1/*
2 * Copyright (c) 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by the University of
19 * California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * from: @(#)vm_kern.c 8.3 (Berkeley) 1/12/94
37 *
38 *
39 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40 * All rights reserved.
41 *
42 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
43 *
44 * Permission to use, copy, modify and distribute this software and
45 * its documentation is hereby granted, provided that both the copyright
46 * notice and this permission notice appear in all copies of the
47 * software, derivative works or modified versions, and any portions
48 * thereof, and that both notices appear in supporting documentation.
49 *
50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
53 *
54 * Carnegie Mellon requests users of this software to return to
55 *
56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
57 * School of Computer Science
58 * Carnegie Mellon University
59 * Pittsburgh PA 15213-3890
60 *
61 * any improvements or extensions that they make and grant Carnegie the
62 * rights to redistribute these changes.
63 */
64
65/*
66 * Kernel memory management.
67 */
68
69#include <sys/cdefs.h>
70__FBSDID("$FreeBSD: head/sys/vm/vm_kern.c 118764 2003-08-11 05:51:51Z silby $");
71
72#include <sys/param.h>
73#include <sys/systm.h>
74#include <sys/kernel.h> /* for ticks and hz */
75#include <sys/lock.h>
76#include <sys/mutex.h>
77#include <sys/proc.h>
78#include <sys/malloc.h>
79
80#include <vm/vm.h>
81#include <vm/vm_param.h>
82#include <vm/pmap.h>
83#include <vm/vm_map.h>
84#include <vm/vm_object.h>
85#include <vm/vm_page.h>
86#include <vm/vm_pageout.h>
87#include <vm/vm_extern.h>
88
89vm_map_t kernel_map=0;
90vm_map_t kmem_map=0;
91vm_map_t exec_map=0;
92vm_map_t pipe_map;
93vm_map_t clean_map=0;
94vm_map_t buffer_map=0;
95
96/*
97 * kmem_alloc_pageable:
98 *
99 * Allocate pageable memory to the kernel's address map.
100 * "map" must be kernel_map or a submap of kernel_map.
101 */
102vm_offset_t
103kmem_alloc_pageable(map, size)
104 vm_map_t map;
105 vm_size_t size;
106{
107 vm_offset_t addr;
108 int result;
109
110 size = round_page(size);
111 addr = vm_map_min(map);
112 result = vm_map_find(map, NULL, 0,
113 &addr, size, TRUE, VM_PROT_ALL, VM_PROT_ALL, 0);
114 if (result != KERN_SUCCESS) {
115 return (0);
116 }
117 return (addr);
118}
119
120/*
121 * kmem_alloc_nofault:
122 *
123 * Allocate a virtual address range with no underlying object and
124 * no initial mapping to physical memory. Any mapping from this
125 * range to physical memory must be explicitly created prior to
126 * its use, typically with pmap_qenter(). Any attempt to create
127 * a mapping on demand through vm_fault() will result in a panic.
128 */
129vm_offset_t
130kmem_alloc_nofault(map, size)
131 vm_map_t map;
132 vm_size_t size;
133{
134 vm_offset_t addr;
135 int result;
136
137 size = round_page(size);
138 addr = vm_map_min(map);
139 result = vm_map_find(map, NULL, 0,
140 &addr, size, TRUE, VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT);
141 if (result != KERN_SUCCESS) {
142 return (0);
143 }
144 return (addr);
145}
146
147/*
148 * Allocate wired-down memory in the kernel's address map
149 * or a submap.
150 */
151vm_offset_t
152kmem_alloc(map, size)
153 vm_map_t map;
154 vm_size_t size;
155{
156 vm_offset_t addr;
157 vm_offset_t offset;
158 vm_offset_t i;
159
160 size = round_page(size);
161
162 /*
163 * Use the kernel object for wired-down kernel pages. Assume that no
164 * region of the kernel object is referenced more than once.
165 */
166
167 /*
168 * Locate sufficient space in the map. This will give us the final
169 * virtual address for the new memory, and thus will tell us the
170 * offset within the kernel map.
171 */
172 vm_map_lock(map);
173 if (vm_map_findspace(map, vm_map_min(map), size, &addr)) {
174 vm_map_unlock(map);
175 return (0);
176 }
177 offset = addr - VM_MIN_KERNEL_ADDRESS;
178 vm_object_reference(kernel_object);
179 vm_map_insert(map, kernel_object, offset, addr, addr + size,
180 VM_PROT_ALL, VM_PROT_ALL, 0);
181 vm_map_unlock(map);
182
183 /*
184 * Guarantee that there are pages already in this object before
185 * calling vm_map_pageable. This is to prevent the following
186 * scenario:
187 *
188 * 1) Threads have swapped out, so that there is a pager for the
189 * kernel_object. 2) The kmsg zone is empty, and so we are
190 * kmem_allocing a new page for it. 3) vm_map_pageable calls vm_fault;
191 * there is no page, but there is a pager, so we call
192 * pager_data_request. But the kmsg zone is empty, so we must
193 * kmem_alloc. 4) goto 1 5) Even if the kmsg zone is not empty: when
194 * we get the data back from the pager, it will be (very stale)
195 * non-zero data. kmem_alloc is defined to return zero-filled memory.
196 *
197 * We're intentionally not activating the pages we allocate to prevent a
198 * race with page-out. vm_map_pageable will wire the pages.
199 */
200 for (i = 0; i < size; i += PAGE_SIZE) {
201 vm_page_t mem;
202
203 VM_OBJECT_LOCK(kernel_object);
204 mem = vm_page_grab(kernel_object, OFF_TO_IDX(offset + i),
205 VM_ALLOC_ZERO | VM_ALLOC_RETRY);
206 VM_OBJECT_UNLOCK(kernel_object);
207 if ((mem->flags & PG_ZERO) == 0)
208 pmap_zero_page(mem);
209 vm_page_lock_queues();
210 mem->valid = VM_PAGE_BITS_ALL;
211 vm_page_flag_clear(mem, PG_ZERO);
212 vm_page_wakeup(mem);
213 vm_page_unlock_queues();
214 }
215
216 /*
217 * And finally, mark the data as non-pageable.
218 */
219 (void) vm_map_wire(map, addr, addr + size, FALSE);
220
221 return (addr);
222}
223
224/*
225 * kmem_free:
226 *
227 * Release a region of kernel virtual memory allocated
228 * with kmem_alloc, and return the physical pages
229 * associated with that region.
230 *
231 * This routine may not block on kernel maps.
232 */
233void
234kmem_free(map, addr, size)
235 vm_map_t map;
236 vm_offset_t addr;
237 vm_size_t size;
238{
239
240 (void) vm_map_remove(map, trunc_page(addr), round_page(addr + size));
241}
242
243/*
244 * kmem_suballoc:
245 *
246 * Allocates a map to manage a subrange
247 * of the kernel virtual address space.
248 *
249 * Arguments are as follows:
250 *
251 * parent Map to take range from
252 * min, max Returned endpoints of map
253 * size Size of range to find
254 */
255vm_map_t
256kmem_suballoc(parent, min, max, size)
257 vm_map_t parent;
258 vm_offset_t *min, *max;
259 vm_size_t size;
260{
261 int ret;
262 vm_map_t result;
263
264 GIANT_REQUIRED;
265
266 size = round_page(size);
267
268 *min = (vm_offset_t) vm_map_min(parent);
269 ret = vm_map_find(parent, NULL, (vm_offset_t) 0,
270 min, size, TRUE, VM_PROT_ALL, VM_PROT_ALL, 0);
271 if (ret != KERN_SUCCESS) {
272 printf("kmem_suballoc: bad status return of %d.\n", ret);
273 panic("kmem_suballoc");
274 }
275 *max = *min + size;
276 result = vm_map_create(vm_map_pmap(parent), *min, *max);
277 if (result == NULL)
278 panic("kmem_suballoc: cannot create submap");
279 if (vm_map_submap(parent, *min, *max, result) != KERN_SUCCESS)
280 panic("kmem_suballoc: unable to change range to submap");
281 return (result);
282}
283
284/*
285 * kmem_malloc:
286 *
287 * Allocate wired-down memory in the kernel's address map for the higher
288 * level kernel memory allocator (kern/kern_malloc.c). We cannot use
289 * kmem_alloc() because we may need to allocate memory at interrupt
290 * level where we cannot block (canwait == FALSE).
291 *
292 * This routine has its own private kernel submap (kmem_map) and object
293 * (kmem_object). This, combined with the fact that only malloc uses
294 * this routine, ensures that we will never block in map or object waits.
295 *
296 * Note that this still only works in a uni-processor environment and
297 * when called at splhigh().
298 *
299 * We don't worry about expanding the map (adding entries) since entries
300 * for wired maps are statically allocated.
301 *
302 * NOTE: This routine is not supposed to block if M_NOWAIT is set, but
303 * I have not verified that it actually does not block.
304 *
305 * `map' is ONLY allowed to be kmem_map or one of the mbuf submaps to
306 * which we never free.
307 */
308vm_offset_t
309kmem_malloc(map, size, flags)
310 vm_map_t map;
311 vm_size_t size;
312 int flags;
313{
314 vm_offset_t offset, i;
315 vm_map_entry_t entry;
316 vm_offset_t addr;
317 vm_page_t m;
318 int pflags;
319
320 size = round_page(size);
321 addr = vm_map_min(map);
322
323 /*
324 * Locate sufficient space in the map. This will give us the final
325 * virtual address for the new memory, and thus will tell us the
326 * offset within the kernel map.
327 */
328 vm_map_lock(map);
329 if (vm_map_findspace(map, vm_map_min(map), size, &addr)) {
330 vm_map_unlock(map);
331 if (map != kmem_map) {
332 static int last_report; /* when we did it (in ticks) */
333 if (ticks < last_report ||
334 (ticks - last_report) >= hz) {
335 last_report = ticks;
336 printf("Out of mbuf address space!\n");
337 printf("Consider increasing NMBCLUSTERS\n");
338 }
339 return (0);
340 }
341 if ((flags & M_NOWAIT) == 0)
342 panic("kmem_malloc(%ld): kmem_map too small: %ld total allocated",
343 (long)size, (long)map->size);
344 return (0);
345 }
346 offset = addr - VM_MIN_KERNEL_ADDRESS;
347 vm_object_reference(kmem_object);
348 vm_map_insert(map, kmem_object, offset, addr, addr + size,
349 VM_PROT_ALL, VM_PROT_ALL, 0);
350
351 /*
352 * Note: if M_NOWAIT specified alone, allocate from
353 * interrupt-safe queues only (just the free list). If
354 * M_USE_RESERVE is also specified, we can also
355 * allocate from the cache. Neither of the latter two
356 * flags may be specified from an interrupt since interrupts
357 * are not allowed to mess with the cache queue.
358 */
359
360 if ((flags & (M_NOWAIT|M_USE_RESERVE)) == M_NOWAIT)
361 pflags = VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED;
362 else
363 pflags = VM_ALLOC_SYSTEM | VM_ALLOC_WIRED;
364
365 if (flags & M_ZERO)
366 pflags |= VM_ALLOC_ZERO;
367
368 VM_OBJECT_LOCK(kmem_object);
369 for (i = 0; i < size; i += PAGE_SIZE) {
370retry:
371 m = vm_page_alloc(kmem_object, OFF_TO_IDX(offset + i), pflags);
372
373 /*
374 * Ran out of space, free everything up and return. Don't need
375 * to lock page queues here as we know that the pages we got
376 * aren't on any queues.
377 */
378 if (m == NULL) {
379 if ((flags & M_NOWAIT) == 0) {
380 VM_OBJECT_UNLOCK(kmem_object);
381 vm_map_unlock(map);
382 VM_WAIT;
383 vm_map_lock(map);
384 VM_OBJECT_LOCK(kmem_object);
385 goto retry;
386 }
387 /*
388 * Free the pages before removing the map entry.
389 * They are already marked busy. Calling
390 * vm_map_delete before the pages has been freed or
391 * unbusied will cause a deadlock.
392 */
393 while (i != 0) {
394 i -= PAGE_SIZE;
395 m = vm_page_lookup(kmem_object,
396 OFF_TO_IDX(offset + i));
397 vm_page_lock_queues();
398 vm_page_unwire(m, 0);
399 vm_page_free(m);
400 vm_page_unlock_queues();
401 }
402 VM_OBJECT_UNLOCK(kmem_object);
403 vm_map_delete(map, addr, addr + size);
404 vm_map_unlock(map);
405 return (0);
406 }
407 if (flags & M_ZERO && (m->flags & PG_ZERO) == 0)
408 pmap_zero_page(m);
409 vm_page_lock_queues();
410 vm_page_flag_clear(m, PG_ZERO);
411 m->valid = VM_PAGE_BITS_ALL;
412 vm_page_unlock_queues();
413 }
414 VM_OBJECT_UNLOCK(kmem_object);
415
416 /*
417 * Mark map entry as non-pageable. Assert: vm_map_insert() will never
418 * be able to extend the previous entry so there will be a new entry
419 * exactly corresponding to this address range and it will have
420 * wired_count == 0.
421 */
422 if (!vm_map_lookup_entry(map, addr, &entry) ||
423 entry->start != addr || entry->end != addr + size ||
424 entry->wired_count != 0)
425 panic("kmem_malloc: entry not found or misaligned");
426 entry->wired_count = 1;
427
428 vm_map_simplify_entry(map, entry);
429
430 /*
431 * Loop thru pages, entering them in the pmap. (We cannot add them to
432 * the wired count without wrapping the vm_page_queue_lock in
433 * splimp...)
434 */
435 for (i = 0; i < size; i += PAGE_SIZE) {
436 VM_OBJECT_LOCK(kmem_object);
437 m = vm_page_lookup(kmem_object, OFF_TO_IDX(offset + i));
438 VM_OBJECT_UNLOCK(kmem_object);
439 /*
440 * Because this is kernel_pmap, this call will not block.
441 */
442 pmap_enter(kernel_pmap, addr + i, m, VM_PROT_ALL, 1);
443 vm_page_lock_queues();
444 vm_page_flag_set(m, PG_WRITEABLE | PG_REFERENCED);
445 vm_page_wakeup(m);
446 vm_page_unlock_queues();
447 }
448 vm_map_unlock(map);
449
450 return (addr);
451}
452
453/*
454 * kmem_alloc_wait:
455 *
456 * Allocates pageable memory from a sub-map of the kernel. If the submap
457 * has no room, the caller sleeps waiting for more memory in the submap.
458 *
459 * This routine may block.
460 */
461vm_offset_t
462kmem_alloc_wait(map, size)
463 vm_map_t map;
464 vm_size_t size;
465{
466 vm_offset_t addr;
467
468 size = round_page(size);
469
470 for (;;) {
471 /*
472 * To make this work for more than one map, use the map's lock
473 * to lock out sleepers/wakers.
474 */
475 vm_map_lock(map);
476 if (vm_map_findspace(map, vm_map_min(map), size, &addr) == 0)
477 break;
478 /* no space now; see if we can ever get space */
479 if (vm_map_max(map) - vm_map_min(map) < size) {
480 vm_map_unlock(map);
481 return (0);
482 }
483 map->needs_wakeup = TRUE;
484 vm_map_unlock_and_wait(map, FALSE);
485 }
486 vm_map_insert(map, NULL, 0, addr, addr + size, VM_PROT_ALL, VM_PROT_ALL, 0);
487 vm_map_unlock(map);
488 return (addr);
489}
490
491/*
492 * kmem_free_wakeup:
493 *
494 * Returns memory to a submap of the kernel, and wakes up any processes
495 * waiting for memory in that map.
496 */
497void
498kmem_free_wakeup(map, addr, size)
499 vm_map_t map;
500 vm_offset_t addr;
501 vm_size_t size;
502{
503
504 vm_map_lock(map);
505 (void) vm_map_delete(map, trunc_page(addr), round_page(addr + size));
506 if (map->needs_wakeup) {
507 map->needs_wakeup = FALSE;
508 vm_map_wakeup(map);
509 }
510 vm_map_unlock(map);
511}
512
513/*
514 * kmem_init:
515 *
516 * Create the kernel map; insert a mapping covering kernel text,
517 * data, bss, and all space allocated thus far (`boostrap' data). The
518 * new map will thus map the range between VM_MIN_KERNEL_ADDRESS and
519 * `start' as allocated, and the range between `start' and `end' as free.
520 */
521void
522kmem_init(start, end)
523 vm_offset_t start, end;
524{
525 vm_map_t m;
526
527 m = vm_map_create(kernel_pmap, VM_MIN_KERNEL_ADDRESS, end);
528 m->system_map = 1;
529 vm_map_lock(m);
530 /* N.B.: cannot use kgdb to debug, starting with this assignment ... */
531 kernel_map = m;
532 (void) vm_map_insert(m, NULL, (vm_ooffset_t) 0,
533 VM_MIN_KERNEL_ADDRESS, start, VM_PROT_ALL, VM_PROT_ALL, 0);
534 /* ... and ending with the completion of the above `insert' */
535 vm_map_unlock(m);
536}