vm_kern.c revision 21737
1/*
2 * Copyright (c) 1991, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 *    must display the following acknowledgement:
18 *	This product includes software developed by the University of
19 *	California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 *    may be used to endorse or promote products derived from this software
22 *    without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 *	from: @(#)vm_kern.c	8.3 (Berkeley) 1/12/94
37 *
38 *
39 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40 * All rights reserved.
41 *
42 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
43 *
44 * Permission to use, copy, modify and distribute this software and
45 * its documentation is hereby granted, provided that both the copyright
46 * notice and this permission notice appear in all copies of the
47 * software, derivative works or modified versions, and any portions
48 * thereof, and that both notices appear in supporting documentation.
49 *
50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
53 *
54 * Carnegie Mellon requests users of this software to return to
55 *
56 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
57 *  School of Computer Science
58 *  Carnegie Mellon University
59 *  Pittsburgh PA 15213-3890
60 *
61 * any improvements or extensions that they make and grant Carnegie the
62 * rights to redistribute these changes.
63 *
64 * $FreeBSD: head/sys/vm/vm_kern.c 21737 1997-01-15 20:46:02Z dg $
65 */
66
67/*
68 *	Kernel memory management.
69 */
70
71#include <sys/param.h>
72#include <sys/systm.h>
73#include <sys/kernel.h>
74#include <sys/proc.h>
75#include <sys/malloc.h>
76#include <sys/syslog.h>
77#include <sys/queue.h>
78#include <sys/vmmeter.h>
79
80#include <vm/vm.h>
81#include <vm/vm_param.h>
82#include <vm/vm_prot.h>
83#include <vm/lock.h>
84#include <vm/pmap.h>
85#include <vm/vm_map.h>
86#include <vm/vm_object.h>
87#include <vm/vm_page.h>
88#include <vm/vm_pageout.h>
89#include <vm/vm_kern.h>
90#include <vm/vm_extern.h>
91
92vm_map_t kernel_map=0;
93vm_map_t kmem_map=0;
94vm_map_t exec_map=0;
95vm_map_t exech_map=0;
96vm_map_t clean_map=0;
97vm_map_t u_map=0;
98vm_map_t buffer_map=0;
99vm_map_t mb_map=0;
100int mb_map_full=0;
101vm_map_t io_map=0;
102vm_map_t phys_map=0;
103
104/*
105 *	kmem_alloc_pageable:
106 *
107 *	Allocate pageable memory to the kernel's address map.
108 *	"map" must be kernel_map or a submap of kernel_map.
109 */
110
111vm_offset_t
112kmem_alloc_pageable(map, size)
113	vm_map_t map;
114	register vm_size_t size;
115{
116	vm_offset_t addr;
117	register int result;
118
119	size = round_page(size);
120	addr = vm_map_min(map);
121	result = vm_map_find(map, NULL, (vm_offset_t) 0,
122	    &addr, size, TRUE, VM_PROT_ALL, VM_PROT_ALL, 0);
123	if (result != KERN_SUCCESS) {
124		return (0);
125	}
126	return (addr);
127}
128
129/*
130 *	Allocate wired-down memory in the kernel's address map
131 *	or a submap.
132 */
133vm_offset_t
134kmem_alloc(map, size)
135	register vm_map_t map;
136	register vm_size_t size;
137{
138	vm_offset_t addr;
139	register vm_offset_t offset;
140	vm_offset_t i;
141
142	size = round_page(size);
143
144	/*
145	 * Use the kernel object for wired-down kernel pages. Assume that no
146	 * region of the kernel object is referenced more than once.
147	 */
148
149	/*
150	 * Locate sufficient space in the map.  This will give us the final
151	 * virtual address for the new memory, and thus will tell us the
152	 * offset within the kernel map.
153	 */
154	vm_map_lock(map);
155	if (vm_map_findspace(map, 0, size, &addr)) {
156		vm_map_unlock(map);
157		return (0);
158	}
159	offset = addr - VM_MIN_KERNEL_ADDRESS;
160	vm_object_reference(kernel_object);
161	vm_map_insert(map, kernel_object, offset, addr, addr + size,
162		VM_PROT_ALL, VM_PROT_ALL, 0);
163	vm_map_unlock(map);
164
165	/*
166	 * Guarantee that there are pages already in this object before
167	 * calling vm_map_pageable.  This is to prevent the following
168	 * scenario:
169	 *
170	 * 1) Threads have swapped out, so that there is a pager for the
171	 * kernel_object. 2) The kmsg zone is empty, and so we are
172	 * kmem_allocing a new page for it. 3) vm_map_pageable calls vm_fault;
173	 * there is no page, but there is a pager, so we call
174	 * pager_data_request.  But the kmsg zone is empty, so we must
175	 * kmem_alloc. 4) goto 1 5) Even if the kmsg zone is not empty: when
176	 * we get the data back from the pager, it will be (very stale)
177	 * non-zero data.  kmem_alloc is defined to return zero-filled memory.
178	 *
179	 * We're intentionally not activating the pages we allocate to prevent a
180	 * race with page-out.  vm_map_pageable will wire the pages.
181	 */
182
183	for (i = 0; i < size; i += PAGE_SIZE) {
184		vm_page_t mem;
185
186		while ((mem = vm_page_alloc(kernel_object,
187			OFF_TO_IDX(offset + i), VM_ALLOC_ZERO)) == NULL) {
188			VM_WAIT;
189		}
190		if ((mem->flags & PG_ZERO) == 0)
191			vm_page_zero_fill(mem);
192		mem->flags &= ~(PG_BUSY|PG_ZERO);
193		mem->valid = VM_PAGE_BITS_ALL;
194	}
195
196	/*
197	 * And finally, mark the data as non-pageable.
198	 */
199
200	(void) vm_map_pageable(map, (vm_offset_t) addr, addr + size, FALSE);
201
202	return (addr);
203}
204
205/*
206 *	kmem_free:
207 *
208 *	Release a region of kernel virtual memory allocated
209 *	with kmem_alloc, and return the physical pages
210 *	associated with that region.
211 */
212void
213kmem_free(map, addr, size)
214	vm_map_t map;
215	register vm_offset_t addr;
216	vm_size_t size;
217{
218	(void) vm_map_remove(map, trunc_page(addr), round_page(addr + size));
219}
220
221/*
222 *	kmem_suballoc:
223 *
224 *	Allocates a map to manage a subrange
225 *	of the kernel virtual address space.
226 *
227 *	Arguments are as follows:
228 *
229 *	parent		Map to take range from
230 *	size		Size of range to find
231 *	min, max	Returned endpoints of map
232 *	pageable	Can the region be paged
233 */
234vm_map_t
235kmem_suballoc(parent, min, max, size, pageable)
236	register vm_map_t parent;
237	vm_offset_t *min, *max;
238	register vm_size_t size;
239	boolean_t pageable;
240{
241	register int ret;
242	vm_map_t result;
243
244	size = round_page(size);
245
246	*min = (vm_offset_t) vm_map_min(parent);
247	ret = vm_map_find(parent, NULL, (vm_offset_t) 0,
248	    min, size, TRUE, VM_PROT_ALL, VM_PROT_ALL, 0);
249	if (ret != KERN_SUCCESS) {
250		printf("kmem_suballoc: bad status return of %d.\n", ret);
251		panic("kmem_suballoc");
252	}
253	*max = *min + size;
254	pmap_reference(vm_map_pmap(parent));
255	result = vm_map_create(vm_map_pmap(parent), *min, *max, pageable);
256	if (result == NULL)
257		panic("kmem_suballoc: cannot create submap");
258	if ((ret = vm_map_submap(parent, *min, *max, result)) != KERN_SUCCESS)
259		panic("kmem_suballoc: unable to change range to submap");
260	return (result);
261}
262
263/*
264 * Allocate wired-down memory in the kernel's address map for the higher
265 * level kernel memory allocator (kern/kern_malloc.c).  We cannot use
266 * kmem_alloc() because we may need to allocate memory at interrupt
267 * level where we cannot block (canwait == FALSE).
268 *
269 * This routine has its own private kernel submap (kmem_map) and object
270 * (kmem_object).  This, combined with the fact that only malloc uses
271 * this routine, ensures that we will never block in map or object waits.
272 *
273 * Note that this still only works in a uni-processor environment and
274 * when called at splhigh().
275 *
276 * We don't worry about expanding the map (adding entries) since entries
277 * for wired maps are statically allocated.
278 */
279vm_offset_t
280kmem_malloc(map, size, waitflag)
281	register vm_map_t map;
282	register vm_size_t size;
283	boolean_t waitflag;
284{
285	register vm_offset_t offset, i;
286	vm_map_entry_t entry;
287	vm_offset_t addr;
288	vm_page_t m;
289
290	if (map != kmem_map && map != mb_map)
291		panic("kmem_malloc: map != {kmem,mb}_map");
292
293	size = round_page(size);
294	addr = vm_map_min(map);
295
296	/*
297	 * Locate sufficient space in the map.  This will give us the final
298	 * virtual address for the new memory, and thus will tell us the
299	 * offset within the kernel map.
300	 */
301	vm_map_lock(map);
302	if (vm_map_findspace(map, 0, size, &addr)) {
303		vm_map_unlock(map);
304		if (map == mb_map) {
305			mb_map_full = TRUE;
306			log(LOG_ERR, "Out of mbuf clusters - increase maxusers!\n");
307			return (0);
308		}
309		if (waitflag == M_WAITOK)
310			panic("kmem_malloc: kmem_map too small");
311		return (0);
312	}
313	offset = addr - VM_MIN_KERNEL_ADDRESS;
314	vm_object_reference(kmem_object);
315	vm_map_insert(map, kmem_object, offset, addr, addr + size,
316		VM_PROT_ALL, VM_PROT_ALL, 0);
317
318	for (i = 0; i < size; i += PAGE_SIZE) {
319retry:
320		m = vm_page_alloc(kmem_object, OFF_TO_IDX(offset + i),
321			(waitflag == M_NOWAIT) ? VM_ALLOC_INTERRUPT : VM_ALLOC_SYSTEM);
322
323		/*
324		 * Ran out of space, free everything up and return. Don't need
325		 * to lock page queues here as we know that the pages we got
326		 * aren't on any queues.
327		 */
328		if (m == NULL) {
329			if (waitflag == M_WAITOK) {
330				VM_WAIT;
331				goto retry;
332			}
333			while (i != 0) {
334				i -= PAGE_SIZE;
335				m = vm_page_lookup(kmem_object,
336					OFF_TO_IDX(offset + i));
337				PAGE_WAKEUP(m);
338				vm_page_free(m);
339			}
340			vm_map_delete(map, addr, addr + size);
341			vm_map_unlock(map);
342			return (0);
343		}
344		m->flags &= ~PG_ZERO;
345		m->valid = VM_PAGE_BITS_ALL;
346	}
347
348	/*
349	 * Mark map entry as non-pageable. Assert: vm_map_insert() will never
350	 * be able to extend the previous entry so there will be a new entry
351	 * exactly corresponding to this address range and it will have
352	 * wired_count == 0.
353	 */
354	if (!vm_map_lookup_entry(map, addr, &entry) ||
355	    entry->start != addr || entry->end != addr + size ||
356	    entry->wired_count)
357		panic("kmem_malloc: entry not found or misaligned");
358	entry->wired_count++;
359
360	vm_map_simplify_entry(map, entry);
361
362	/*
363	 * Loop thru pages, entering them in the pmap. (We cannot add them to
364	 * the wired count without wrapping the vm_page_queue_lock in
365	 * splimp...)
366	 */
367	for (i = 0; i < size; i += PAGE_SIZE) {
368		m = vm_page_lookup(kmem_object, OFF_TO_IDX(offset + i));
369		vm_page_wire(m);
370		PAGE_WAKEUP(m);
371		pmap_enter(kernel_pmap, addr + i, VM_PAGE_TO_PHYS(m),
372			VM_PROT_ALL, 1);
373		m->flags |= PG_MAPPED|PG_WRITEABLE;
374	}
375	vm_map_unlock(map);
376
377	return (addr);
378}
379
380/*
381 *	kmem_alloc_wait
382 *
383 *	Allocates pageable memory from a sub-map of the kernel.  If the submap
384 *	has no room, the caller sleeps waiting for more memory in the submap.
385 *
386 */
387vm_offset_t
388kmem_alloc_wait(map, size)
389	vm_map_t map;
390	vm_size_t size;
391{
392	vm_offset_t addr;
393
394	size = round_page(size);
395
396	for (;;) {
397		/*
398		 * To make this work for more than one map, use the map's lock
399		 * to lock out sleepers/wakers.
400		 */
401		vm_map_lock(map);
402		if (vm_map_findspace(map, 0, size, &addr) == 0)
403			break;
404		/* no space now; see if we can ever get space */
405		if (vm_map_max(map) - vm_map_min(map) < size) {
406			vm_map_unlock(map);
407			return (0);
408		}
409		vm_map_unlock(map);
410		tsleep(map, PVM, "kmaw", 0);
411	}
412	vm_map_insert(map, NULL, (vm_offset_t) 0, addr, addr + size, VM_PROT_ALL, VM_PROT_ALL, 0);
413	vm_map_unlock(map);
414	return (addr);
415}
416
417/*
418 *	kmem_free_wakeup
419 *
420 *	Returns memory to a submap of the kernel, and wakes up any processes
421 *	waiting for memory in that map.
422 */
423void
424kmem_free_wakeup(map, addr, size)
425	vm_map_t map;
426	vm_offset_t addr;
427	vm_size_t size;
428{
429	vm_map_lock(map);
430	(void) vm_map_delete(map, trunc_page(addr), round_page(addr + size));
431	wakeup(map);
432	vm_map_unlock(map);
433}
434
435/*
436 * Create the kernel map; insert a mapping covering kernel text, data, bss,
437 * and all space allocated thus far (`boostrap' data).  The new map will thus
438 * map the range between VM_MIN_KERNEL_ADDRESS and `start' as allocated, and
439 * the range between `start' and `end' as free.
440 */
441void
442kmem_init(start, end)
443	vm_offset_t start, end;
444{
445	register vm_map_t m;
446
447	m = vm_map_create(kernel_pmap, VM_MIN_KERNEL_ADDRESS, end, FALSE);
448	vm_map_lock(m);
449	/* N.B.: cannot use kgdb to debug, starting with this assignment ... */
450	kernel_map = m;
451	(void) vm_map_insert(m, NULL, (vm_offset_t) 0,
452	    VM_MIN_KERNEL_ADDRESS, start, VM_PROT_ALL, VM_PROT_ALL, 0);
453	/* ... and ending with the completion of the above `insert' */
454	vm_map_unlock(m);
455}
456