vm_kern.c revision 189015
1/*-
2 * Copyright (c) 1991, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 4. Neither the name of the University nor the names of its contributors
17 *    may be used to endorse or promote products derived from this software
18 *    without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 *	from: @(#)vm_kern.c	8.3 (Berkeley) 1/12/94
33 *
34 *
35 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
36 * All rights reserved.
37 *
38 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
39 *
40 * Permission to use, copy, modify and distribute this software and
41 * its documentation is hereby granted, provided that both the copyright
42 * notice and this permission notice appear in all copies of the
43 * software, derivative works or modified versions, and any portions
44 * thereof, and that both notices appear in supporting documentation.
45 *
46 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
47 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
48 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
49 *
50 * Carnegie Mellon requests users of this software to return to
51 *
52 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
53 *  School of Computer Science
54 *  Carnegie Mellon University
55 *  Pittsburgh PA 15213-3890
56 *
57 * any improvements or extensions that they make and grant Carnegie the
58 * rights to redistribute these changes.
59 */
60
61/*
62 *	Kernel memory management.
63 */
64
65#include <sys/cdefs.h>
66__FBSDID("$FreeBSD: head/sys/vm/vm_kern.c 189015 2009-02-24 20:57:43Z kib $");
67
68#include <sys/param.h>
69#include <sys/systm.h>
70#include <sys/kernel.h>		/* for ticks and hz */
71#include <sys/eventhandler.h>
72#include <sys/lock.h>
73#include <sys/mutex.h>
74#include <sys/proc.h>
75#include <sys/malloc.h>
76#include <sys/sysctl.h>
77
78#include <vm/vm.h>
79#include <vm/vm_param.h>
80#include <vm/pmap.h>
81#include <vm/vm_map.h>
82#include <vm/vm_object.h>
83#include <vm/vm_page.h>
84#include <vm/vm_pageout.h>
85#include <vm/vm_extern.h>
86#include <vm/uma.h>
87
88vm_map_t kernel_map=0;
89vm_map_t kmem_map=0;
90vm_map_t exec_map=0;
91vm_map_t pipe_map;
92vm_map_t buffer_map=0;
93
94/*
95 *	kmem_alloc_nofault:
96 *
97 *	Allocate a virtual address range with no underlying object and
98 *	no initial mapping to physical memory.  Any mapping from this
99 *	range to physical memory must be explicitly created prior to
100 *	its use, typically with pmap_qenter().  Any attempt to create
101 *	a mapping on demand through vm_fault() will result in a panic.
102 */
103vm_offset_t
104kmem_alloc_nofault(map, size)
105	vm_map_t map;
106	vm_size_t size;
107{
108	vm_offset_t addr;
109	int result;
110
111	size = round_page(size);
112	addr = vm_map_min(map);
113	result = vm_map_find(map, NULL, 0, &addr, size, VMFS_ANY_SPACE,
114	    VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT);
115	if (result != KERN_SUCCESS) {
116		return (0);
117	}
118	return (addr);
119}
120
121/*
122 *	Allocate wired-down memory in the kernel's address map
123 *	or a submap.
124 */
125vm_offset_t
126kmem_alloc(map, size)
127	vm_map_t map;
128	vm_size_t size;
129{
130	vm_offset_t addr;
131	vm_offset_t offset;
132	vm_offset_t i;
133
134	size = round_page(size);
135
136	/*
137	 * Use the kernel object for wired-down kernel pages. Assume that no
138	 * region of the kernel object is referenced more than once.
139	 */
140
141	/*
142	 * Locate sufficient space in the map.  This will give us the final
143	 * virtual address for the new memory, and thus will tell us the
144	 * offset within the kernel map.
145	 */
146	vm_map_lock(map);
147	if (vm_map_findspace(map, vm_map_min(map), size, &addr)) {
148		vm_map_unlock(map);
149		return (0);
150	}
151	offset = addr - VM_MIN_KERNEL_ADDRESS;
152	vm_object_reference(kernel_object);
153	vm_map_insert(map, kernel_object, offset, addr, addr + size,
154		VM_PROT_ALL, VM_PROT_ALL, 0);
155	vm_map_unlock(map);
156
157	/*
158	 * Guarantee that there are pages already in this object before
159	 * calling vm_map_wire.  This is to prevent the following
160	 * scenario:
161	 *
162	 * 1) Threads have swapped out, so that there is a pager for the
163	 * kernel_object. 2) The kmsg zone is empty, and so we are
164	 * kmem_allocing a new page for it. 3) vm_map_wire calls vm_fault;
165	 * there is no page, but there is a pager, so we call
166	 * pager_data_request.  But the kmsg zone is empty, so we must
167	 * kmem_alloc. 4) goto 1 5) Even if the kmsg zone is not empty: when
168	 * we get the data back from the pager, it will be (very stale)
169	 * non-zero data.  kmem_alloc is defined to return zero-filled memory.
170	 *
171	 * We're intentionally not activating the pages we allocate to prevent a
172	 * race with page-out.  vm_map_wire will wire the pages.
173	 */
174	VM_OBJECT_LOCK(kernel_object);
175	for (i = 0; i < size; i += PAGE_SIZE) {
176		vm_page_t mem;
177
178		mem = vm_page_grab(kernel_object, OFF_TO_IDX(offset + i),
179		    VM_ALLOC_NOBUSY | VM_ALLOC_ZERO | VM_ALLOC_RETRY);
180		mem->valid = VM_PAGE_BITS_ALL;
181		KASSERT((mem->flags & PG_UNMANAGED) != 0,
182		    ("kmem_alloc: page %p is managed", mem));
183	}
184	VM_OBJECT_UNLOCK(kernel_object);
185
186	/*
187	 * And finally, mark the data as non-pageable.
188	 */
189	(void) vm_map_wire(map, addr, addr + size,
190	    VM_MAP_WIRE_SYSTEM|VM_MAP_WIRE_NOHOLES);
191
192	return (addr);
193}
194
195/*
196 *	kmem_free:
197 *
198 *	Release a region of kernel virtual memory allocated
199 *	with kmem_alloc, and return the physical pages
200 *	associated with that region.
201 *
202 *	This routine may not block on kernel maps.
203 */
204void
205kmem_free(map, addr, size)
206	vm_map_t map;
207	vm_offset_t addr;
208	vm_size_t size;
209{
210
211	(void) vm_map_remove(map, trunc_page(addr), round_page(addr + size));
212}
213
214/*
215 *	kmem_suballoc:
216 *
217 *	Allocates a map to manage a subrange
218 *	of the kernel virtual address space.
219 *
220 *	Arguments are as follows:
221 *
222 *	parent		Map to take range from
223 *	min, max	Returned endpoints of map
224 *	size		Size of range to find
225 *	superpage_align	Request that min is superpage aligned
226 */
227vm_map_t
228kmem_suballoc(vm_map_t parent, vm_offset_t *min, vm_offset_t *max,
229    vm_size_t size, boolean_t superpage_align)
230{
231	int ret;
232	vm_map_t result;
233
234	size = round_page(size);
235
236	*min = vm_map_min(parent);
237	ret = vm_map_find(parent, NULL, 0, min, size, superpage_align ?
238	    VMFS_ALIGNED_SPACE : VMFS_ANY_SPACE, VM_PROT_ALL, VM_PROT_ALL, 0);
239	if (ret != KERN_SUCCESS)
240		panic("kmem_suballoc: bad status return of %d", ret);
241	*max = *min + size;
242	result = vm_map_create(vm_map_pmap(parent), *min, *max);
243	if (result == NULL)
244		panic("kmem_suballoc: cannot create submap");
245	if (vm_map_submap(parent, *min, *max, result) != KERN_SUCCESS)
246		panic("kmem_suballoc: unable to change range to submap");
247	return (result);
248}
249
250/*
251 *	kmem_malloc:
252 *
253 * 	Allocate wired-down memory in the kernel's address map for the higher
254 * 	level kernel memory allocator (kern/kern_malloc.c).  We cannot use
255 * 	kmem_alloc() because we may need to allocate memory at interrupt
256 * 	level where we cannot block (canwait == FALSE).
257 *
258 * 	This routine has its own private kernel submap (kmem_map) and object
259 * 	(kmem_object).  This, combined with the fact that only malloc uses
260 * 	this routine, ensures that we will never block in map or object waits.
261 *
262 * 	We don't worry about expanding the map (adding entries) since entries
263 * 	for wired maps are statically allocated.
264 *
265 *	`map' is ONLY allowed to be kmem_map or one of the mbuf submaps to
266 *	which we never free.
267 */
268vm_offset_t
269kmem_malloc(map, size, flags)
270	vm_map_t map;
271	vm_size_t size;
272	int flags;
273{
274	vm_offset_t offset, i;
275	vm_map_entry_t entry;
276	vm_offset_t addr;
277	vm_page_t m;
278	int pflags;
279
280	size = round_page(size);
281	addr = vm_map_min(map);
282
283	/*
284	 * Locate sufficient space in the map.  This will give us the final
285	 * virtual address for the new memory, and thus will tell us the
286	 * offset within the kernel map.
287	 */
288	vm_map_lock(map);
289	if (vm_map_findspace(map, vm_map_min(map), size, &addr)) {
290		vm_map_unlock(map);
291                if ((flags & M_NOWAIT) == 0) {
292			for (i = 0; i < 8; i++) {
293				EVENTHANDLER_INVOKE(vm_lowmem, 0);
294				uma_reclaim();
295				vm_map_lock(map);
296				if (vm_map_findspace(map, vm_map_min(map),
297				    size, &addr) == 0) {
298					break;
299				}
300				vm_map_unlock(map);
301				tsleep(&i, 0, "nokva", (hz / 4) * (i + 1));
302			}
303			if (i == 8) {
304				panic("kmem_malloc(%ld): kmem_map too small: %ld total allocated",
305				    (long)size, (long)map->size);
306			}
307		} else {
308			return (0);
309		}
310	}
311	offset = addr - VM_MIN_KERNEL_ADDRESS;
312	vm_object_reference(kmem_object);
313	vm_map_insert(map, kmem_object, offset, addr, addr + size,
314		VM_PROT_ALL, VM_PROT_ALL, 0);
315
316	if ((flags & (M_NOWAIT|M_USE_RESERVE)) == M_NOWAIT)
317		pflags = VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED;
318	else
319		pflags = VM_ALLOC_SYSTEM | VM_ALLOC_WIRED;
320
321	if (flags & M_ZERO)
322		pflags |= VM_ALLOC_ZERO;
323
324	VM_OBJECT_LOCK(kmem_object);
325	for (i = 0; i < size; i += PAGE_SIZE) {
326retry:
327		m = vm_page_alloc(kmem_object, OFF_TO_IDX(offset + i), pflags);
328
329		/*
330		 * Ran out of space, free everything up and return. Don't need
331		 * to lock page queues here as we know that the pages we got
332		 * aren't on any queues.
333		 */
334		if (m == NULL) {
335			if ((flags & M_NOWAIT) == 0) {
336				VM_OBJECT_UNLOCK(kmem_object);
337				vm_map_unlock(map);
338				VM_WAIT;
339				vm_map_lock(map);
340				VM_OBJECT_LOCK(kmem_object);
341				goto retry;
342			}
343			/*
344			 * Free the pages before removing the map entry.
345			 * They are already marked busy.  Calling
346			 * vm_map_delete before the pages has been freed or
347			 * unbusied will cause a deadlock.
348			 */
349			while (i != 0) {
350				i -= PAGE_SIZE;
351				m = vm_page_lookup(kmem_object,
352						   OFF_TO_IDX(offset + i));
353				vm_page_lock_queues();
354				vm_page_unwire(m, 0);
355				vm_page_free(m);
356				vm_page_unlock_queues();
357			}
358			VM_OBJECT_UNLOCK(kmem_object);
359			vm_map_delete(map, addr, addr + size);
360			vm_map_unlock(map);
361			return (0);
362		}
363		if (flags & M_ZERO && (m->flags & PG_ZERO) == 0)
364			pmap_zero_page(m);
365		m->valid = VM_PAGE_BITS_ALL;
366		KASSERT((m->flags & PG_UNMANAGED) != 0,
367		    ("kmem_malloc: page %p is managed", m));
368	}
369	VM_OBJECT_UNLOCK(kmem_object);
370
371	/*
372	 * Mark map entry as non-pageable. Assert: vm_map_insert() will never
373	 * be able to extend the previous entry so there will be a new entry
374	 * exactly corresponding to this address range and it will have
375	 * wired_count == 0.
376	 */
377	if (!vm_map_lookup_entry(map, addr, &entry) ||
378	    entry->start != addr || entry->end != addr + size ||
379	    entry->wired_count != 0)
380		panic("kmem_malloc: entry not found or misaligned");
381	entry->wired_count = 1;
382
383	/*
384	 * At this point, the kmem_object must be unlocked because
385	 * vm_map_simplify_entry() calls vm_object_deallocate(), which
386	 * locks the kmem_object.
387	 */
388	vm_map_simplify_entry(map, entry);
389
390	/*
391	 * Loop thru pages, entering them in the pmap.
392	 */
393	VM_OBJECT_LOCK(kmem_object);
394	for (i = 0; i < size; i += PAGE_SIZE) {
395		m = vm_page_lookup(kmem_object, OFF_TO_IDX(offset + i));
396		/*
397		 * Because this is kernel_pmap, this call will not block.
398		 */
399		pmap_enter(kernel_pmap, addr + i, VM_PROT_ALL, m, VM_PROT_ALL,
400		    TRUE);
401		vm_page_wakeup(m);
402	}
403	VM_OBJECT_UNLOCK(kmem_object);
404	vm_map_unlock(map);
405
406	return (addr);
407}
408
409/*
410 *	kmem_alloc_wait:
411 *
412 *	Allocates pageable memory from a sub-map of the kernel.  If the submap
413 *	has no room, the caller sleeps waiting for more memory in the submap.
414 *
415 *	This routine may block.
416 */
417vm_offset_t
418kmem_alloc_wait(map, size)
419	vm_map_t map;
420	vm_size_t size;
421{
422	vm_offset_t addr;
423
424	size = round_page(size);
425
426	for (;;) {
427		/*
428		 * To make this work for more than one map, use the map's lock
429		 * to lock out sleepers/wakers.
430		 */
431		vm_map_lock(map);
432		if (vm_map_findspace(map, vm_map_min(map), size, &addr) == 0)
433			break;
434		/* no space now; see if we can ever get space */
435		if (vm_map_max(map) - vm_map_min(map) < size) {
436			vm_map_unlock(map);
437			return (0);
438		}
439		map->needs_wakeup = TRUE;
440		vm_map_unlock_and_wait(map, 0);
441	}
442	vm_map_insert(map, NULL, 0, addr, addr + size, VM_PROT_ALL, VM_PROT_ALL, 0);
443	vm_map_unlock(map);
444	return (addr);
445}
446
447/*
448 *	kmem_free_wakeup:
449 *
450 *	Returns memory to a submap of the kernel, and wakes up any processes
451 *	waiting for memory in that map.
452 */
453void
454kmem_free_wakeup(map, addr, size)
455	vm_map_t map;
456	vm_offset_t addr;
457	vm_size_t size;
458{
459
460	vm_map_lock(map);
461	(void) vm_map_delete(map, trunc_page(addr), round_page(addr + size));
462	if (map->needs_wakeup) {
463		map->needs_wakeup = FALSE;
464		vm_map_wakeup(map);
465	}
466	vm_map_unlock(map);
467}
468
469/*
470 * 	kmem_init:
471 *
472 *	Create the kernel map; insert a mapping covering kernel text,
473 *	data, bss, and all space allocated thus far (`boostrap' data).  The
474 *	new map will thus map the range between VM_MIN_KERNEL_ADDRESS and
475 *	`start' as allocated, and the range between `start' and `end' as free.
476 */
477void
478kmem_init(start, end)
479	vm_offset_t start, end;
480{
481	vm_map_t m;
482
483	m = vm_map_create(kernel_pmap, VM_MIN_KERNEL_ADDRESS, end);
484	m->system_map = 1;
485	vm_map_lock(m);
486	/* N.B.: cannot use kgdb to debug, starting with this assignment ... */
487	kernel_map = m;
488	(void) vm_map_insert(m, NULL, (vm_ooffset_t) 0,
489#ifdef __amd64__
490	    KERNBASE,
491#else
492	    VM_MIN_KERNEL_ADDRESS,
493#endif
494	    start, VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT);
495	/* ... and ending with the completion of the above `insert' */
496	vm_map_unlock(m);
497}
498
499#ifdef DIAGNOSTIC
500/*
501 * Allow userspace to directly trigger the VM drain routine for testing
502 * purposes.
503 */
504static int
505debug_vm_lowmem(SYSCTL_HANDLER_ARGS)
506{
507	int error, i;
508
509	i = 0;
510	error = sysctl_handle_int(oidp, &i, 0, req);
511	if (error)
512		return (error);
513	if (i)
514		EVENTHANDLER_INVOKE(vm_lowmem, 0);
515	return (0);
516}
517
518SYSCTL_PROC(_debug, OID_AUTO, vm_lowmem, CTLTYPE_INT | CTLFLAG_RW, 0, 0,
519    debug_vm_lowmem, "I", "set to trigger vm_lowmem event");
520#endif
521