uvm_km.c revision 1.58
1/*	$NetBSD: uvm_km.c,v 1.58 2002/09/15 16:54:30 chs Exp $	*/
2
3/*
4 * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 * Copyright (c) 1991, 1993, The Regents of the University of California.
6 *
7 * All rights reserved.
8 *
9 * This code is derived from software contributed to Berkeley by
10 * The Mach Operating System project at Carnegie-Mellon University.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 *    notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 *    notice, this list of conditions and the following disclaimer in the
19 *    documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 *    must display the following acknowledgement:
22 *	This product includes software developed by Charles D. Cranor,
23 *      Washington University, the University of California, Berkeley and
24 *      its contributors.
25 * 4. Neither the name of the University nor the names of its contributors
26 *    may be used to endorse or promote products derived from this software
27 *    without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * SUCH DAMAGE.
40 *
41 *	@(#)vm_kern.c   8.3 (Berkeley) 1/12/94
42 * from: Id: uvm_km.c,v 1.1.2.14 1998/02/06 05:19:27 chs Exp
43 *
44 *
45 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
46 * All rights reserved.
47 *
48 * Permission to use, copy, modify and distribute this software and
49 * its documentation is hereby granted, provided that both the copyright
50 * notice and this permission notice appear in all copies of the
51 * software, derivative works or modified versions, and any portions
52 * thereof, and that both notices appear in supporting documentation.
53 *
54 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
55 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
56 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
57 *
58 * Carnegie Mellon requests users of this software to return to
59 *
60 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
61 *  School of Computer Science
62 *  Carnegie Mellon University
63 *  Pittsburgh PA 15213-3890
64 *
65 * any improvements or extensions that they make and grant Carnegie the
66 * rights to redistribute these changes.
67 */
68
69/*
70 * uvm_km.c: handle kernel memory allocation and management
71 */
72
73/*
74 * overview of kernel memory management:
75 *
76 * the kernel virtual address space is mapped by "kernel_map."   kernel_map
77 * starts at VM_MIN_KERNEL_ADDRESS and goes to VM_MAX_KERNEL_ADDRESS.
78 * note that VM_MIN_KERNEL_ADDRESS is equal to vm_map_min(kernel_map).
79 *
80 * the kernel_map has several "submaps."   submaps can only appear in
81 * the kernel_map (user processes can't use them).   submaps "take over"
82 * the management of a sub-range of the kernel's address space.  submaps
83 * are typically allocated at boot time and are never released.   kernel
84 * virtual address space that is mapped by a submap is locked by the
85 * submap's lock -- not the kernel_map's lock.
86 *
87 * thus, the useful feature of submaps is that they allow us to break
88 * up the locking and protection of the kernel address space into smaller
89 * chunks.
90 *
91 * the vm system has several standard kernel submaps, including:
92 *   kmem_map => contains only wired kernel memory for the kernel
93 *		malloc.   *** access to kmem_map must be protected
94 *		by splvm() because we are allowed to call malloc()
95 *		at interrupt time ***
96 *   mb_map => memory for large mbufs,  *** protected by splvm ***
97 *   pager_map => used to map "buf" structures into kernel space
98 *   exec_map => used during exec to handle exec args
99 *   etc...
100 *
101 * the kernel allocates its private memory out of special uvm_objects whose
102 * reference count is set to UVM_OBJ_KERN (thus indicating that the objects
103 * are "special" and never die).   all kernel objects should be thought of
104 * as large, fixed-sized, sparsely populated uvm_objects.   each kernel
105 * object is equal to the size of kernel virtual address space (i.e. the
106 * value "VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS").
107 *
108 * most kernel private memory lives in kernel_object.   the only exception
109 * to this is for memory that belongs to submaps that must be protected
110 * by splvm().  pages in these submaps are not assigned to an object.
111 *
112 * note that just because a kernel object spans the entire kernel virutal
113 * address space doesn't mean that it has to be mapped into the entire space.
114 * large chunks of a kernel object's space go unused either because
115 * that area of kernel VM is unmapped, or there is some other type of
116 * object mapped into that range (e.g. a vnode).    for submap's kernel
117 * objects, the only part of the object that can ever be populated is the
118 * offsets that are managed by the submap.
119 *
120 * note that the "offset" in a kernel object is always the kernel virtual
121 * address minus the VM_MIN_KERNEL_ADDRESS (aka vm_map_min(kernel_map)).
122 * example:
123 *   suppose VM_MIN_KERNEL_ADDRESS is 0xf8000000 and the kernel does a
124 *   uvm_km_alloc(kernel_map, PAGE_SIZE) [allocate 1 wired down page in the
125 *   kernel map].    if uvm_km_alloc returns virtual address 0xf8235000,
126 *   then that means that the page at offset 0x235000 in kernel_object is
127 *   mapped at 0xf8235000.
128 *
129 * kernel object have one other special property: when the kernel virtual
130 * memory mapping them is unmapped, the backing memory in the object is
131 * freed right away.   this is done with the uvm_km_pgremove() function.
132 * this has to be done because there is no backing store for kernel pages
133 * and no need to save them after they are no longer referenced.
134 */
135
136#include <sys/cdefs.h>
137__KERNEL_RCSID(0, "$NetBSD: uvm_km.c,v 1.58 2002/09/15 16:54:30 chs Exp $");
138
139#include "opt_uvmhist.h"
140
141#include <sys/param.h>
142#include <sys/systm.h>
143#include <sys/proc.h>
144
145#include <uvm/uvm.h>
146
147/*
148 * global data structures
149 */
150
151struct vm_map *kernel_map = NULL;
152
153/*
154 * local data structues
155 */
156
157static struct vm_map		kernel_map_store;
158
159/*
160 * uvm_km_init: init kernel maps and objects to reflect reality (i.e.
161 * KVM already allocated for text, data, bss, and static data structures).
162 *
163 * => KVM is defined by VM_MIN_KERNEL_ADDRESS/VM_MAX_KERNEL_ADDRESS.
164 *    we assume that [min -> start] has already been allocated and that
165 *    "end" is the end.
166 */
167
168void
169uvm_km_init(start, end)
170	vaddr_t start, end;
171{
172	vaddr_t base = VM_MIN_KERNEL_ADDRESS;
173
174	/*
175	 * next, init kernel memory objects.
176	 */
177
178	/* kernel_object: for pageable anonymous kernel memory */
179	uao_init();
180	uvm.kernel_object = uao_create(VM_MAX_KERNEL_ADDRESS -
181				 VM_MIN_KERNEL_ADDRESS, UAO_FLAG_KERNOBJ);
182
183	/*
184	 * init the map and reserve any space that might already
185	 * have been allocated kernel space before installing.
186	 */
187
188	uvm_map_setup(&kernel_map_store, base, end, VM_MAP_PAGEABLE);
189	kernel_map_store.pmap = pmap_kernel();
190	if (start != base &&
191	    uvm_map(&kernel_map_store, &base, start - base, NULL,
192		    UVM_UNKNOWN_OFFSET, 0,
193		    UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
194		    		UVM_ADV_RANDOM, UVM_FLAG_FIXED)) != 0)
195		panic("uvm_km_init: could not reserve space for kernel");
196
197	/*
198	 * install!
199	 */
200
201	kernel_map = &kernel_map_store;
202}
203
204/*
205 * uvm_km_suballoc: allocate a submap in the kernel map.   once a submap
206 * is allocated all references to that area of VM must go through it.  this
207 * allows the locking of VAs in kernel_map to be broken up into regions.
208 *
209 * => if `fixed' is true, *min specifies where the region described
210 *      by the submap must start
211 * => if submap is non NULL we use that as the submap, otherwise we
212 *	alloc a new map
213 */
214struct vm_map *
215uvm_km_suballoc(map, min, max, size, flags, fixed, submap)
216	struct vm_map *map;
217	vaddr_t *min, *max;		/* IN/OUT, OUT */
218	vsize_t size;
219	int flags;
220	boolean_t fixed;
221	struct vm_map *submap;
222{
223	int mapflags = UVM_FLAG_NOMERGE | (fixed ? UVM_FLAG_FIXED : 0);
224
225	size = round_page(size);	/* round up to pagesize */
226
227	/*
228	 * first allocate a blank spot in the parent map
229	 */
230
231	if (uvm_map(map, min, size, NULL, UVM_UNKNOWN_OFFSET, 0,
232	    UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
233	    UVM_ADV_RANDOM, mapflags)) != 0) {
234	       panic("uvm_km_suballoc: unable to allocate space in parent map");
235	}
236
237	/*
238	 * set VM bounds (min is filled in by uvm_map)
239	 */
240
241	*max = *min + size;
242
243	/*
244	 * add references to pmap and create or init the submap
245	 */
246
247	pmap_reference(vm_map_pmap(map));
248	if (submap == NULL) {
249		submap = uvm_map_create(vm_map_pmap(map), *min, *max, flags);
250		if (submap == NULL)
251			panic("uvm_km_suballoc: unable to create submap");
252	} else {
253		uvm_map_setup(submap, *min, *max, flags);
254		submap->pmap = vm_map_pmap(map);
255	}
256
257	/*
258	 * now let uvm_map_submap plug in it...
259	 */
260
261	if (uvm_map_submap(map, *min, *max, submap) != 0)
262		panic("uvm_km_suballoc: submap allocation failed");
263
264	return(submap);
265}
266
267/*
268 * uvm_km_pgremove: remove pages from a kernel uvm_object.
269 *
270 * => when you unmap a part of anonymous kernel memory you want to toss
271 *    the pages right away.    (this gets called from uvm_unmap_...).
272 */
273
274void
275uvm_km_pgremove(uobj, start, end)
276	struct uvm_object *uobj;
277	vaddr_t start, end;
278{
279	struct vm_page *pg;
280	voff_t curoff, nextoff;
281	int swpgonlydelta = 0;
282	UVMHIST_FUNC("uvm_km_pgremove"); UVMHIST_CALLED(maphist);
283
284	KASSERT(uobj->pgops == &aobj_pager);
285	simple_lock(&uobj->vmobjlock);
286
287	for (curoff = start; curoff < end; curoff = nextoff) {
288		nextoff = curoff + PAGE_SIZE;
289		pg = uvm_pagelookup(uobj, curoff);
290		if (pg != NULL && pg->flags & PG_BUSY) {
291			pg->flags |= PG_WANTED;
292			UVM_UNLOCK_AND_WAIT(pg, &uobj->vmobjlock, 0,
293				    "km_pgrm", 0);
294			simple_lock(&uobj->vmobjlock);
295			nextoff = curoff;
296			continue;
297		}
298
299		/*
300		 * free the swap slot, then the page.
301		 */
302
303		if (pg == NULL &&
304		    uao_find_swslot(uobj, curoff >> PAGE_SHIFT) != 0) {
305			swpgonlydelta++;
306		}
307		uao_dropswap(uobj, curoff >> PAGE_SHIFT);
308		if (pg != NULL) {
309			uvm_lock_pageq();
310			uvm_pagefree(pg);
311			uvm_unlock_pageq();
312		}
313	}
314	simple_unlock(&uobj->vmobjlock);
315
316	if (swpgonlydelta > 0) {
317		simple_lock(&uvm.swap_data_lock);
318		KASSERT(uvmexp.swpgonly >= swpgonlydelta);
319		uvmexp.swpgonly -= swpgonlydelta;
320		simple_unlock(&uvm.swap_data_lock);
321	}
322}
323
324
325/*
326 * uvm_km_pgremove_intrsafe: like uvm_km_pgremove(), but for "intrsafe"
327 *    maps
328 *
329 * => when you unmap a part of anonymous kernel memory you want to toss
330 *    the pages right away.    (this is called from uvm_unmap_...).
331 * => none of the pages will ever be busy, and none of them will ever
332 *    be on the active or inactive queues (because they have no object).
333 */
334
335void
336uvm_km_pgremove_intrsafe(start, end)
337	vaddr_t start, end;
338{
339	struct vm_page *pg;
340	paddr_t pa;
341	UVMHIST_FUNC("uvm_km_pgremove_intrsafe"); UVMHIST_CALLED(maphist);
342
343	for (; start < end; start += PAGE_SIZE) {
344		if (!pmap_extract(pmap_kernel(), start, &pa)) {
345			continue;
346		}
347		pg = PHYS_TO_VM_PAGE(pa);
348		KASSERT(pg);
349		KASSERT(pg->uobject == NULL && pg->uanon == NULL);
350		uvm_pagefree(pg);
351	}
352}
353
354
355/*
356 * uvm_km_kmemalloc: lower level kernel memory allocator for malloc()
357 *
358 * => we map wired memory into the specified map using the obj passed in
359 * => NOTE: we can return NULL even if we can wait if there is not enough
360 *	free VM space in the map... caller should be prepared to handle
361 *	this case.
362 * => we return KVA of memory allocated
363 * => flags: NOWAIT, VALLOC - just allocate VA, TRYLOCK - fail if we can't
364 *	lock the map
365 */
366
367vaddr_t
368uvm_km_kmemalloc(map, obj, size, flags)
369	struct vm_map *map;
370	struct uvm_object *obj;
371	vsize_t size;
372	int flags;
373{
374	vaddr_t kva, loopva;
375	vaddr_t offset;
376	vsize_t loopsize;
377	struct vm_page *pg;
378	UVMHIST_FUNC("uvm_km_kmemalloc"); UVMHIST_CALLED(maphist);
379
380	UVMHIST_LOG(maphist,"  (map=0x%x, obj=0x%x, size=0x%x, flags=%d)",
381		    map, obj, size, flags);
382	KASSERT(vm_map_pmap(map) == pmap_kernel());
383
384	/*
385	 * setup for call
386	 */
387
388	size = round_page(size);
389	kva = vm_map_min(map);	/* hint */
390
391	/*
392	 * allocate some virtual space
393	 */
394
395	if (__predict_false(uvm_map(map, &kva, size, obj, UVM_UNKNOWN_OFFSET,
396	      0, UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
397			  UVM_ADV_RANDOM, (flags & UVM_KMF_TRYLOCK)))
398			!= 0)) {
399		UVMHIST_LOG(maphist, "<- done (no VM)",0,0,0,0);
400		return(0);
401	}
402
403	/*
404	 * if all we wanted was VA, return now
405	 */
406
407	if (flags & UVM_KMF_VALLOC) {
408		UVMHIST_LOG(maphist,"<- done valloc (kva=0x%x)", kva,0,0,0);
409		return(kva);
410	}
411
412	/*
413	 * recover object offset from virtual address
414	 */
415
416	offset = kva - vm_map_min(kernel_map);
417	UVMHIST_LOG(maphist, "  kva=0x%x, offset=0x%x", kva, offset,0,0);
418
419	/*
420	 * now allocate and map in the memory... note that we are the only ones
421	 * whom should ever get a handle on this area of VM.
422	 */
423
424	loopva = kva;
425	loopsize = size;
426	while (loopsize) {
427		if (obj) {
428			simple_lock(&obj->vmobjlock);
429		}
430		pg = uvm_pagealloc(obj, offset, NULL, UVM_PGA_USERESERVE);
431		if (__predict_true(pg != NULL)) {
432			pg->flags &= ~PG_BUSY;	/* new page */
433			UVM_PAGE_OWN(pg, NULL);
434		}
435		if (obj) {
436			simple_unlock(&obj->vmobjlock);
437		}
438
439		/*
440		 * out of memory?
441		 */
442
443		if (__predict_false(pg == NULL)) {
444			int t;
445
446			t = uvmexp.active + uvmexp.inactive + uvmexp.free;
447			if ((flags & UVM_KMF_NOWAIT) ||
448			    ((flags & UVM_KMF_CANFAIL) &&
449			     uvmexp.swpgonly == uvmexp.swpages)) {
450				/* free everything! */
451				uvm_unmap(map, kva, kva + size);
452				return (0);
453			} else {
454				uvm_wait("km_getwait2");	/* sleep here */
455				continue;
456			}
457		}
458
459		/*
460		 * map it in
461		 */
462
463		if (obj == NULL) {
464			pmap_kenter_pa(loopva, VM_PAGE_TO_PHYS(pg),
465			    VM_PROT_READ | VM_PROT_WRITE);
466		} else {
467			pmap_enter(map->pmap, loopva, VM_PAGE_TO_PHYS(pg),
468			    UVM_PROT_ALL,
469			    PMAP_WIRED | VM_PROT_READ | VM_PROT_WRITE);
470		}
471		loopva += PAGE_SIZE;
472		offset += PAGE_SIZE;
473		loopsize -= PAGE_SIZE;
474	}
475
476       	pmap_update(pmap_kernel());
477
478	UVMHIST_LOG(maphist,"<- done (kva=0x%x)", kva,0,0,0);
479	return(kva);
480}
481
482/*
483 * uvm_km_free: free an area of kernel memory
484 */
485
486void
487uvm_km_free(map, addr, size)
488	struct vm_map *map;
489	vaddr_t addr;
490	vsize_t size;
491{
492	uvm_unmap(map, trunc_page(addr), round_page(addr+size));
493}
494
495/*
496 * uvm_km_free_wakeup: free an area of kernel memory and wake up
497 * anyone waiting for vm space.
498 *
499 * => XXX: "wanted" bit + unlock&wait on other end?
500 */
501
502void
503uvm_km_free_wakeup(map, addr, size)
504	struct vm_map *map;
505	vaddr_t addr;
506	vsize_t size;
507{
508	struct vm_map_entry *dead_entries;
509
510	vm_map_lock(map);
511	uvm_unmap_remove(map, trunc_page(addr), round_page(addr + size),
512	    &dead_entries);
513	wakeup(map);
514	vm_map_unlock(map);
515	if (dead_entries != NULL)
516		uvm_unmap_detach(dead_entries, 0);
517}
518
519/*
520 * uvm_km_alloc1: allocate wired down memory in the kernel map.
521 *
522 * => we can sleep if needed
523 */
524
525vaddr_t
526uvm_km_alloc1(map, size, zeroit)
527	struct vm_map *map;
528	vsize_t size;
529	boolean_t zeroit;
530{
531	vaddr_t kva, loopva, offset;
532	struct vm_page *pg;
533	UVMHIST_FUNC("uvm_km_alloc1"); UVMHIST_CALLED(maphist);
534
535	UVMHIST_LOG(maphist,"(map=0x%x, size=0x%x)", map, size,0,0);
536	KASSERT(vm_map_pmap(map) == pmap_kernel());
537
538	size = round_page(size);
539	kva = vm_map_min(map);		/* hint */
540
541	/*
542	 * allocate some virtual space
543	 */
544
545	if (__predict_false(uvm_map(map, &kva, size, uvm.kernel_object,
546	      UVM_UNKNOWN_OFFSET, 0, UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL,
547					      UVM_INH_NONE, UVM_ADV_RANDOM,
548					      0)) != 0)) {
549		UVMHIST_LOG(maphist,"<- done (no VM)",0,0,0,0);
550		return(0);
551	}
552
553	/*
554	 * recover object offset from virtual address
555	 */
556
557	offset = kva - vm_map_min(kernel_map);
558	UVMHIST_LOG(maphist,"  kva=0x%x, offset=0x%x", kva, offset,0,0);
559
560	/*
561	 * now allocate the memory.
562	 */
563
564	loopva = kva;
565	while (size) {
566		simple_lock(&uvm.kernel_object->vmobjlock);
567		KASSERT(uvm_pagelookup(uvm.kernel_object, offset) == NULL);
568		pg = uvm_pagealloc(uvm.kernel_object, offset, NULL, 0);
569		if (pg) {
570			pg->flags &= ~PG_BUSY;
571			UVM_PAGE_OWN(pg, NULL);
572		}
573		simple_unlock(&uvm.kernel_object->vmobjlock);
574		if (pg == NULL) {
575			uvm_wait("km_alloc1w");
576			continue;
577		}
578		pmap_enter(map->pmap, loopva, VM_PAGE_TO_PHYS(pg),
579		    UVM_PROT_ALL, PMAP_WIRED | VM_PROT_READ | VM_PROT_WRITE);
580		loopva += PAGE_SIZE;
581		offset += PAGE_SIZE;
582		size -= PAGE_SIZE;
583	}
584	pmap_update(map->pmap);
585
586	/*
587	 * zero on request (note that "size" is now zero due to the above loop
588	 * so we need to subtract kva from loopva to reconstruct the size).
589	 */
590
591	if (zeroit)
592		memset((caddr_t)kva, 0, loopva - kva);
593	UVMHIST_LOG(maphist,"<- done (kva=0x%x)", kva,0,0,0);
594	return(kva);
595}
596
597/*
598 * uvm_km_valloc: allocate zero-fill memory in the kernel's address space
599 *
600 * => memory is not allocated until fault time
601 */
602
603vaddr_t
604uvm_km_valloc(map, size)
605	struct vm_map *map;
606	vsize_t size;
607{
608	return(uvm_km_valloc_align(map, size, 0));
609}
610
611vaddr_t
612uvm_km_valloc_align(map, size, align)
613	struct vm_map *map;
614	vsize_t size;
615	vsize_t align;
616{
617	vaddr_t kva;
618	UVMHIST_FUNC("uvm_km_valloc"); UVMHIST_CALLED(maphist);
619
620	UVMHIST_LOG(maphist, "(map=0x%x, size=0x%x)", map, size, 0,0);
621	KASSERT(vm_map_pmap(map) == pmap_kernel());
622
623	size = round_page(size);
624	kva = vm_map_min(map);		/* hint */
625
626	/*
627	 * allocate some virtual space.  will be demand filled by kernel_object.
628	 */
629
630	if (__predict_false(uvm_map(map, &kva, size, uvm.kernel_object,
631	    UVM_UNKNOWN_OFFSET, align, UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL,
632					    UVM_INH_NONE, UVM_ADV_RANDOM,
633					    0)) != 0)) {
634		UVMHIST_LOG(maphist, "<- done (no VM)", 0,0,0,0);
635		return(0);
636	}
637
638	UVMHIST_LOG(maphist, "<- done (kva=0x%x)", kva,0,0,0);
639	return(kva);
640}
641
642/*
643 * uvm_km_valloc_wait: allocate zero-fill memory in the kernel's address space
644 *
645 * => memory is not allocated until fault time
646 * => if no room in map, wait for space to free, unless requested size
647 *    is larger than map (in which case we return 0)
648 */
649
650vaddr_t
651uvm_km_valloc_prefer_wait(map, size, prefer)
652	struct vm_map *map;
653	vsize_t size;
654	voff_t prefer;
655{
656	vaddr_t kva;
657	UVMHIST_FUNC("uvm_km_valloc_prefer_wait"); UVMHIST_CALLED(maphist);
658
659	UVMHIST_LOG(maphist, "(map=0x%x, size=0x%x)", map, size, 0,0);
660	KASSERT(vm_map_pmap(map) == pmap_kernel());
661
662	size = round_page(size);
663	if (size > vm_map_max(map) - vm_map_min(map))
664		return(0);
665
666	for (;;) {
667		kva = vm_map_min(map);		/* hint */
668
669		/*
670		 * allocate some virtual space.   will be demand filled
671		 * by kernel_object.
672		 */
673
674		if (__predict_true(uvm_map(map, &kva, size, uvm.kernel_object,
675		    prefer, 0, UVM_MAPFLAG(UVM_PROT_ALL,
676		    UVM_PROT_ALL, UVM_INH_NONE, UVM_ADV_RANDOM, 0))
677		    == 0)) {
678			UVMHIST_LOG(maphist,"<- done (kva=0x%x)", kva,0,0,0);
679			return(kva);
680		}
681
682		/*
683		 * failed.  sleep for a while (on map)
684		 */
685
686		UVMHIST_LOG(maphist,"<<<sleeping>>>",0,0,0,0);
687		tsleep((caddr_t)map, PVM, "vallocwait", 0);
688	}
689	/*NOTREACHED*/
690}
691
692vaddr_t
693uvm_km_valloc_wait(map, size)
694	struct vm_map *map;
695	vsize_t size;
696{
697	return uvm_km_valloc_prefer_wait(map, size, UVM_UNKNOWN_OFFSET);
698}
699
700/* Sanity; must specify both or none. */
701#if (defined(PMAP_MAP_POOLPAGE) || defined(PMAP_UNMAP_POOLPAGE)) && \
702    (!defined(PMAP_MAP_POOLPAGE) || !defined(PMAP_UNMAP_POOLPAGE))
703#error Must specify MAP and UNMAP together.
704#endif
705
706/*
707 * uvm_km_alloc_poolpage: allocate a page for the pool allocator
708 *
709 * => if the pmap specifies an alternate mapping method, we use it.
710 */
711
712/* ARGSUSED */
713vaddr_t
714uvm_km_alloc_poolpage1(map, obj, waitok)
715	struct vm_map *map;
716	struct uvm_object *obj;
717	boolean_t waitok;
718{
719#if defined(PMAP_MAP_POOLPAGE)
720	struct vm_page *pg;
721	vaddr_t va;
722
723 again:
724	pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE);
725	if (__predict_false(pg == NULL)) {
726		if (waitok) {
727			uvm_wait("plpg");
728			goto again;
729		} else
730			return (0);
731	}
732	va = PMAP_MAP_POOLPAGE(VM_PAGE_TO_PHYS(pg));
733	if (__predict_false(va == 0))
734		uvm_pagefree(pg);
735	return (va);
736#else
737	vaddr_t va;
738	int s;
739
740	/*
741	 * NOTE: We may be called with a map that doens't require splvm
742	 * protection (e.g. kernel_map).  However, it does not hurt to
743	 * go to splvm in this case (since unprocted maps will never be
744	 * accessed in interrupt context).
745	 *
746	 * XXX We may want to consider changing the interface to this
747	 * XXX function.
748	 */
749
750	s = splvm();
751	va = uvm_km_kmemalloc(map, obj, PAGE_SIZE, waitok ? 0 : UVM_KMF_NOWAIT);
752	splx(s);
753	return (va);
754#endif /* PMAP_MAP_POOLPAGE */
755}
756
757/*
758 * uvm_km_free_poolpage: free a previously allocated pool page
759 *
760 * => if the pmap specifies an alternate unmapping method, we use it.
761 */
762
763/* ARGSUSED */
764void
765uvm_km_free_poolpage1(map, addr)
766	struct vm_map *map;
767	vaddr_t addr;
768{
769#if defined(PMAP_UNMAP_POOLPAGE)
770	paddr_t pa;
771
772	pa = PMAP_UNMAP_POOLPAGE(addr);
773	uvm_pagefree(PHYS_TO_VM_PAGE(pa));
774#else
775	int s;
776
777	/*
778	 * NOTE: We may be called with a map that doens't require splvm
779	 * protection (e.g. kernel_map).  However, it does not hurt to
780	 * go to splvm in this case (since unprocted maps will never be
781	 * accessed in interrupt context).
782	 *
783	 * XXX We may want to consider changing the interface to this
784	 * XXX function.
785	 */
786
787	s = splvm();
788	uvm_km_free(map, addr, PAGE_SIZE);
789	splx(s);
790#endif /* PMAP_UNMAP_POOLPAGE */
791}
792