1/*	$NetBSD: uvm_km.c,v 1.165 2023/04/09 09:00:56 riastradh Exp $	*/
2
3/*
4 * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 * Copyright (c) 1991, 1993, The Regents of the University of California.
6 *
7 * All rights reserved.
8 *
9 * This code is derived from software contributed to Berkeley by
10 * The Mach Operating System project at Carnegie-Mellon University.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 *    notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 *    notice, this list of conditions and the following disclaimer in the
19 *    documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 *    may be used to endorse or promote products derived from this software
22 *    without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 *	@(#)vm_kern.c   8.3 (Berkeley) 1/12/94
37 * from: Id: uvm_km.c,v 1.1.2.14 1998/02/06 05:19:27 chs Exp
38 *
39 *
40 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
41 * All rights reserved.
42 *
43 * Permission to use, copy, modify and distribute this software and
44 * its documentation is hereby granted, provided that both the copyright
45 * notice and this permission notice appear in all copies of the
46 * software, derivative works or modified versions, and any portions
47 * thereof, and that both notices appear in supporting documentation.
48 *
49 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
50 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
51 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
52 *
53 * Carnegie Mellon requests users of this software to return to
54 *
55 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
56 *  School of Computer Science
57 *  Carnegie Mellon University
58 *  Pittsburgh PA 15213-3890
59 *
60 * any improvements or extensions that they make and grant Carnegie the
61 * rights to redistribute these changes.
62 */
63
64/*
65 * uvm_km.c: handle kernel memory allocation and management
66 */
67
68/*
69 * overview of kernel memory management:
70 *
71 * the kernel virtual address space is mapped by "kernel_map."   kernel_map
72 * starts at VM_MIN_KERNEL_ADDRESS and goes to VM_MAX_KERNEL_ADDRESS.
73 * note that VM_MIN_KERNEL_ADDRESS is equal to vm_map_min(kernel_map).
74 *
75 * the kernel_map has several "submaps."   submaps can only appear in
76 * the kernel_map (user processes can't use them).   submaps "take over"
77 * the management of a sub-range of the kernel's address space.  submaps
78 * are typically allocated at boot time and are never released.   kernel
79 * virtual address space that is mapped by a submap is locked by the
80 * submap's lock -- not the kernel_map's lock.
81 *
82 * thus, the useful feature of submaps is that they allow us to break
83 * up the locking and protection of the kernel address space into smaller
84 * chunks.
85 *
86 * the vm system has several standard kernel submaps/arenas, including:
87 *   kmem_arena => used for kmem/pool (memoryallocators(9))
88 *   pager_map => used to map "buf" structures into kernel space
89 *   exec_map => used during exec to handle exec args
90 *   etc...
91 *
92 * The kmem_arena is a "special submap", as it lives in a fixed map entry
93 * within the kernel_map and is controlled by vmem(9).
94 *
95 * the kernel allocates its private memory out of special uvm_objects whose
96 * reference count is set to UVM_OBJ_KERN (thus indicating that the objects
97 * are "special" and never die).   all kernel objects should be thought of
98 * as large, fixed-sized, sparsely populated uvm_objects.   each kernel
99 * object is equal to the size of kernel virtual address space (i.e. the
100 * value "VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS").
101 *
102 * note that just because a kernel object spans the entire kernel virtual
103 * address space doesn't mean that it has to be mapped into the entire space.
104 * large chunks of a kernel object's space go unused either because
105 * that area of kernel VM is unmapped, or there is some other type of
106 * object mapped into that range (e.g. a vnode).    for submap's kernel
107 * objects, the only part of the object that can ever be populated is the
108 * offsets that are managed by the submap.
109 *
110 * note that the "offset" in a kernel object is always the kernel virtual
111 * address minus the VM_MIN_KERNEL_ADDRESS (aka vm_map_min(kernel_map)).
112 * example:
113 *   suppose VM_MIN_KERNEL_ADDRESS is 0xf8000000 and the kernel does a
114 *   uvm_km_alloc(kernel_map, PAGE_SIZE) [allocate 1 wired down page in the
115 *   kernel map].    if uvm_km_alloc returns virtual address 0xf8235000,
116 *   then that means that the page at offset 0x235000 in kernel_object is
117 *   mapped at 0xf8235000.
118 *
119 * kernel object have one other special property: when the kernel virtual
120 * memory mapping them is unmapped, the backing memory in the object is
121 * freed right away.   this is done with the uvm_km_pgremove() function.
122 * this has to be done because there is no backing store for kernel pages
123 * and no need to save them after they are no longer referenced.
124 *
125 * Generic arenas:
126 *
127 * kmem_arena:
128 *	Main arena controlling the kernel KVA used by other arenas.
129 *
130 * kmem_va_arena:
131 *	Implements quantum caching in order to speedup allocations and
132 *	reduce fragmentation.  The pool(9), unless created with a custom
133 *	meta-data allocator, and kmem(9) subsystems use this arena.
134 *
135 * Arenas for meta-data allocations are used by vmem(9) and pool(9).
136 * These arenas cannot use quantum cache.  However, kmem_va_meta_arena
137 * compensates this by importing larger chunks from kmem_arena.
138 *
139 * kmem_va_meta_arena:
140 *	Space for meta-data.
141 *
142 * kmem_meta_arena:
143 *	Imports from kmem_va_meta_arena.  Allocations from this arena are
144 *	backed with the pages.
145 *
146 * Arena stacking:
147 *
148 *	kmem_arena
149 *		kmem_va_arena
150 *		kmem_va_meta_arena
151 *			kmem_meta_arena
152 */
153
154#include <sys/cdefs.h>
155__KERNEL_RCSID(0, "$NetBSD: uvm_km.c,v 1.165 2023/04/09 09:00:56 riastradh Exp $");
156
157#include "opt_uvmhist.h"
158
159#include "opt_kmempages.h"
160
161#ifndef NKMEMPAGES
162#define NKMEMPAGES 0
163#endif
164
165/*
166 * Defaults for lower and upper-bounds for the kmem_arena page count.
167 * Can be overridden by kernel config options.
168 */
169#ifndef NKMEMPAGES_MIN
170#define NKMEMPAGES_MIN NKMEMPAGES_MIN_DEFAULT
171#endif
172
173#ifndef NKMEMPAGES_MAX
174#define NKMEMPAGES_MAX NKMEMPAGES_MAX_DEFAULT
175#endif
176
177
178#include <sys/param.h>
179#include <sys/systm.h>
180#include <sys/atomic.h>
181#include <sys/proc.h>
182#include <sys/pool.h>
183#include <sys/vmem.h>
184#include <sys/vmem_impl.h>
185#include <sys/kmem.h>
186#include <sys/msan.h>
187
188#include <uvm/uvm.h>
189
190/*
191 * global data structures
192 */
193
194struct vm_map *kernel_map = NULL;
195
196/*
197 * local data structures
198 */
199
200static struct vm_map		kernel_map_store;
201static struct vm_map_entry	kernel_image_mapent_store;
202static struct vm_map_entry	kernel_kmem_mapent_store;
203
204size_t nkmempages = 0;
205vaddr_t kmembase;
206vsize_t kmemsize;
207
208static struct vmem kmem_arena_store;
209vmem_t *kmem_arena = NULL;
210static struct vmem kmem_va_arena_store;
211vmem_t *kmem_va_arena;
212
213/*
214 * kmeminit_nkmempages: calculate the size of kmem_arena.
215 */
216void
217kmeminit_nkmempages(void)
218{
219	size_t npages;
220
221	if (nkmempages != 0) {
222		/*
223		 * It's already been set (by us being here before)
224		 * bail out now;
225		 */
226		return;
227	}
228
229#if defined(NKMEMPAGES_MAX_UNLIMITED) && !defined(KMSAN)
230	npages = physmem;
231#else
232
233#if defined(KMSAN)
234	npages = (physmem / 4);
235#elif defined(PMAP_MAP_POOLPAGE)
236	npages = (physmem / 4);
237#else
238	npages = (physmem / 3) * 2;
239#endif /* defined(PMAP_MAP_POOLPAGE) */
240
241#if !defined(NKMEMPAGES_MAX_UNLIMITED)
242	if (npages > NKMEMPAGES_MAX)
243		npages = NKMEMPAGES_MAX;
244#endif
245
246#endif
247
248	if (npages < NKMEMPAGES_MIN)
249		npages = NKMEMPAGES_MIN;
250
251	nkmempages = npages;
252}
253
254/*
255 * uvm_km_bootstrap: init kernel maps and objects to reflect reality (i.e.
256 * KVM already allocated for text, data, bss, and static data structures).
257 *
258 * => KVM is defined by VM_MIN_KERNEL_ADDRESS/VM_MAX_KERNEL_ADDRESS.
259 *    we assume that [vmin -> start] has already been allocated and that
260 *    "end" is the end.
261 */
262
263void
264uvm_km_bootstrap(vaddr_t start, vaddr_t end)
265{
266	bool kmem_arena_small;
267	vaddr_t base = VM_MIN_KERNEL_ADDRESS;
268	struct uvm_map_args args;
269	int error;
270
271	UVMHIST_FUNC(__func__);
272	UVMHIST_CALLARGS(maphist, "start=%#jx end=%#jx", start, end, 0,0);
273
274	kmeminit_nkmempages();
275	kmemsize = (vsize_t)nkmempages * PAGE_SIZE;
276	kmem_arena_small = kmemsize < 64 * 1024 * 1024;
277
278	UVMHIST_LOG(maphist, "kmemsize=%#jx", kmemsize, 0,0,0);
279
280	/*
281	 * next, init kernel memory objects.
282	 */
283
284	/* kernel_object: for pageable anonymous kernel memory */
285	uvm_kernel_object = uao_create(VM_MAX_KERNEL_ADDRESS -
286				VM_MIN_KERNEL_ADDRESS, UAO_FLAG_KERNOBJ);
287
288	/*
289	 * init the map and reserve any space that might already
290	 * have been allocated kernel space before installing.
291	 */
292
293	uvm_map_setup(&kernel_map_store, base, end, VM_MAP_PAGEABLE);
294	kernel_map_store.pmap = pmap_kernel();
295	if (start != base) {
296		error = uvm_map_prepare(&kernel_map_store,
297		    base, start - base,
298		    NULL, UVM_UNKNOWN_OFFSET, 0,
299		    UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
300		    		UVM_ADV_RANDOM, UVM_FLAG_FIXED), &args);
301		if (!error) {
302			kernel_image_mapent_store.flags =
303			    UVM_MAP_KERNEL | UVM_MAP_STATIC | UVM_MAP_NOMERGE;
304			error = uvm_map_enter(&kernel_map_store, &args,
305			    &kernel_image_mapent_store);
306		}
307
308		if (error)
309			panic(
310			    "uvm_km_bootstrap: could not reserve space for kernel");
311
312		kmembase = args.uma_start + args.uma_size;
313	} else {
314		kmembase = base;
315	}
316
317	error = uvm_map_prepare(&kernel_map_store,
318	    kmembase, kmemsize,
319	    NULL, UVM_UNKNOWN_OFFSET, 0,
320	    UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
321	    		UVM_ADV_RANDOM, UVM_FLAG_FIXED), &args);
322	if (!error) {
323		kernel_kmem_mapent_store.flags =
324		    UVM_MAP_KERNEL | UVM_MAP_STATIC | UVM_MAP_NOMERGE;
325		error = uvm_map_enter(&kernel_map_store, &args,
326		    &kernel_kmem_mapent_store);
327	}
328
329	if (error)
330		panic("uvm_km_bootstrap: could not reserve kernel kmem");
331
332	/*
333	 * install!
334	 */
335
336	kernel_map = &kernel_map_store;
337
338	pool_subsystem_init();
339
340	kmem_arena = vmem_init(&kmem_arena_store, "kmem",
341	    kmembase, kmemsize, PAGE_SIZE, NULL, NULL, NULL,
342	    0, VM_NOSLEEP | VM_BOOTSTRAP, IPL_VM);
343#ifdef PMAP_GROWKERNEL
344	/*
345	 * kmem_arena VA allocations happen independently of uvm_map.
346	 * grow kernel to accommodate the kmem_arena.
347	 */
348	if (uvm_maxkaddr < kmembase + kmemsize) {
349		uvm_maxkaddr = pmap_growkernel(kmembase + kmemsize);
350		KASSERTMSG(uvm_maxkaddr >= kmembase + kmemsize,
351		    "%#"PRIxVADDR" %#"PRIxVADDR" %#"PRIxVSIZE,
352		    uvm_maxkaddr, kmembase, kmemsize);
353	}
354#endif
355
356	vmem_subsystem_init(kmem_arena);
357
358	UVMHIST_LOG(maphist, "kmem vmem created (base=%#jx, size=%#jx",
359	    kmembase, kmemsize, 0,0);
360
361	kmem_va_arena = vmem_init(&kmem_va_arena_store, "kva",
362	    0, 0, PAGE_SIZE, vmem_alloc, vmem_free, kmem_arena,
363	    (kmem_arena_small ? 4 : VMEM_QCACHE_IDX_MAX) * PAGE_SIZE,
364	    VM_NOSLEEP, IPL_VM);
365
366	UVMHIST_LOG(maphist, "<- done", 0,0,0,0);
367}
368
369/*
370 * uvm_km_init: init the kernel maps virtual memory caches
371 * and start the pool/kmem allocator.
372 */
373void
374uvm_km_init(void)
375{
376	kmem_init();
377}
378
379/*
380 * uvm_km_suballoc: allocate a submap in the kernel map.   once a submap
381 * is allocated all references to that area of VM must go through it.  this
382 * allows the locking of VAs in kernel_map to be broken up into regions.
383 *
384 * => if `fixed' is true, *vmin specifies where the region described
385 *   pager_map => used to map "buf" structures into kernel space
386 *      by the submap must start
387 * => if submap is non NULL we use that as the submap, otherwise we
388 *	alloc a new map
389 */
390
391struct vm_map *
392uvm_km_suballoc(struct vm_map *map, vaddr_t *vmin /* IN/OUT */,
393    vaddr_t *vmax /* OUT */, vsize_t size, int flags, bool fixed,
394    struct vm_map *submap)
395{
396	int mapflags = UVM_FLAG_NOMERGE | (fixed ? UVM_FLAG_FIXED : 0);
397	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
398
399	KASSERT(vm_map_pmap(map) == pmap_kernel());
400
401	size = round_page(size);	/* round up to pagesize */
402
403	/*
404	 * first allocate a blank spot in the parent map
405	 */
406
407	if (uvm_map(map, vmin, size, NULL, UVM_UNKNOWN_OFFSET, 0,
408	    UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
409	    UVM_ADV_RANDOM, mapflags)) != 0) {
410		panic("%s: unable to allocate space in parent map", __func__);
411	}
412
413	/*
414	 * set VM bounds (vmin is filled in by uvm_map)
415	 */
416
417	*vmax = *vmin + size;
418
419	/*
420	 * add references to pmap and create or init the submap
421	 */
422
423	pmap_reference(vm_map_pmap(map));
424	if (submap == NULL) {
425		submap = kmem_alloc(sizeof(*submap), KM_SLEEP);
426	}
427	uvm_map_setup(submap, *vmin, *vmax, flags);
428	submap->pmap = vm_map_pmap(map);
429
430	/*
431	 * now let uvm_map_submap plug in it...
432	 */
433
434	if (uvm_map_submap(map, *vmin, *vmax, submap) != 0)
435		panic("uvm_km_suballoc: submap allocation failed");
436
437	return(submap);
438}
439
440/*
441 * uvm_km_pgremove: remove pages from a kernel uvm_object and KVA.
442 */
443
444void
445uvm_km_pgremove(vaddr_t startva, vaddr_t endva)
446{
447	struct uvm_object * const uobj = uvm_kernel_object;
448	const voff_t start = startva - vm_map_min(kernel_map);
449	const voff_t end = endva - vm_map_min(kernel_map);
450	struct vm_page *pg;
451	voff_t curoff, nextoff;
452	int swpgonlydelta = 0;
453	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
454
455	KASSERT(VM_MIN_KERNEL_ADDRESS <= startva);
456	KASSERT(startva < endva);
457	KASSERT(endva <= VM_MAX_KERNEL_ADDRESS);
458
459	rw_enter(uobj->vmobjlock, RW_WRITER);
460	pmap_remove(pmap_kernel(), startva, endva);
461	for (curoff = start; curoff < end; curoff = nextoff) {
462		nextoff = curoff + PAGE_SIZE;
463		pg = uvm_pagelookup(uobj, curoff);
464		if (pg != NULL && pg->flags & PG_BUSY) {
465			uvm_pagewait(pg, uobj->vmobjlock, "km_pgrm");
466			rw_enter(uobj->vmobjlock, RW_WRITER);
467			nextoff = curoff;
468			continue;
469		}
470
471		/*
472		 * free the swap slot, then the page.
473		 */
474
475		if (pg == NULL &&
476		    uao_find_swslot(uobj, curoff >> PAGE_SHIFT) > 0) {
477			swpgonlydelta++;
478		}
479		uao_dropswap(uobj, curoff >> PAGE_SHIFT);
480		if (pg != NULL) {
481			uvm_pagefree(pg);
482		}
483	}
484	rw_exit(uobj->vmobjlock);
485
486	if (swpgonlydelta > 0) {
487		KASSERT(uvmexp.swpgonly >= swpgonlydelta);
488		atomic_add_int(&uvmexp.swpgonly, -swpgonlydelta);
489	}
490}
491
492
493/*
494 * uvm_km_pgremove_intrsafe: like uvm_km_pgremove(), but for non object backed
495 *    regions.
496 *
497 * => when you unmap a part of anonymous kernel memory you want to toss
498 *    the pages right away.    (this is called from uvm_unmap_...).
499 * => none of the pages will ever be busy, and none of them will ever
500 *    be on the active or inactive queues (because they have no object).
501 */
502
503void
504uvm_km_pgremove_intrsafe(struct vm_map *map, vaddr_t start, vaddr_t end)
505{
506#define __PGRM_BATCH 16
507	struct vm_page *pg;
508	paddr_t pa[__PGRM_BATCH];
509	int npgrm, i;
510	vaddr_t va, batch_vastart;
511
512	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
513
514	KASSERT(VM_MAP_IS_KERNEL(map));
515	KASSERTMSG(vm_map_min(map) <= start,
516	    "vm_map_min(map) [%#"PRIxVADDR"] <= start [%#"PRIxVADDR"]"
517	    " (size=%#"PRIxVSIZE")",
518	    vm_map_min(map), start, end - start);
519	KASSERT(start < end);
520	KASSERT(end <= vm_map_max(map));
521
522	for (va = start; va < end;) {
523		batch_vastart = va;
524		/* create a batch of at most __PGRM_BATCH pages to free */
525		for (i = 0;
526		     i < __PGRM_BATCH && va < end;
527		     va += PAGE_SIZE) {
528			if (!pmap_extract(pmap_kernel(), va, &pa[i])) {
529				continue;
530			}
531			i++;
532		}
533		npgrm = i;
534		/* now remove the mappings */
535		pmap_kremove(batch_vastart, va - batch_vastart);
536		/* and free the pages */
537		for (i = 0; i < npgrm; i++) {
538			pg = PHYS_TO_VM_PAGE(pa[i]);
539			KASSERT(pg);
540			KASSERT(pg->uobject == NULL);
541			KASSERT(pg->uanon == NULL);
542			KASSERT((pg->flags & PG_BUSY) == 0);
543			uvm_pagefree(pg);
544		}
545	}
546#undef __PGRM_BATCH
547}
548
549#if defined(DEBUG)
550void
551uvm_km_check_empty(struct vm_map *map, vaddr_t start, vaddr_t end)
552{
553	vaddr_t va;
554	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
555
556	KDASSERT(VM_MAP_IS_KERNEL(map));
557	KDASSERT(vm_map_min(map) <= start);
558	KDASSERT(start < end);
559	KDASSERT(end <= vm_map_max(map));
560
561	for (va = start; va < end; va += PAGE_SIZE) {
562		paddr_t pa;
563
564		if (pmap_extract(pmap_kernel(), va, &pa)) {
565			panic("uvm_km_check_empty: va %p has pa %#llx",
566			    (void *)va, (long long)pa);
567		}
568		/*
569		 * kernel_object should not have pages for the corresponding
570		 * region.  check it.
571		 *
572		 * why trylock?  because:
573		 * - caller might not want to block.
574		 * - we can recurse when allocating radix_node for
575		 *   kernel_object.
576		 */
577		if (rw_tryenter(uvm_kernel_object->vmobjlock, RW_READER)) {
578			struct vm_page *pg;
579
580			pg = uvm_pagelookup(uvm_kernel_object,
581			    va - vm_map_min(kernel_map));
582			rw_exit(uvm_kernel_object->vmobjlock);
583			if (pg) {
584				panic("uvm_km_check_empty: "
585				    "has page hashed at %p",
586				    (const void *)va);
587			}
588		}
589	}
590}
591#endif /* defined(DEBUG) */
592
593/*
594 * uvm_km_alloc: allocate an area of kernel memory.
595 *
596 * => NOTE: we can return 0 even if we can wait if there is not enough
597 *	free VM space in the map... caller should be prepared to handle
598 *	this case.
599 * => we return KVA of memory allocated
600 */
601
602vaddr_t
603uvm_km_alloc(struct vm_map *map, vsize_t size, vsize_t align, uvm_flag_t flags)
604{
605	vaddr_t kva, loopva;
606	vaddr_t offset;
607	vsize_t loopsize;
608	struct vm_page *pg;
609	struct uvm_object *obj;
610	int pgaflags;
611	vm_prot_t prot, vaprot;
612	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
613
614	KASSERT(vm_map_pmap(map) == pmap_kernel());
615	KASSERT((flags & UVM_KMF_TYPEMASK) == UVM_KMF_WIRED ||
616		(flags & UVM_KMF_TYPEMASK) == UVM_KMF_PAGEABLE ||
617		(flags & UVM_KMF_TYPEMASK) == UVM_KMF_VAONLY);
618	KASSERT((flags & UVM_KMF_VAONLY) != 0 || (flags & UVM_KMF_COLORMATCH) == 0);
619	KASSERT((flags & UVM_KMF_COLORMATCH) == 0 || (flags & UVM_KMF_VAONLY) != 0);
620
621	/*
622	 * setup for call
623	 */
624
625	kva = vm_map_min(map);	/* hint */
626	size = round_page(size);
627	obj = (flags & UVM_KMF_PAGEABLE) ? uvm_kernel_object : NULL;
628	UVMHIST_LOG(maphist,"  (map=%#jx, obj=%#jx, size=%#jx, flags=%#jx)",
629	    (uintptr_t)map, (uintptr_t)obj, size, flags);
630
631	/*
632	 * allocate some virtual space
633	 */
634
635	vaprot = (flags & UVM_KMF_EXEC) ? UVM_PROT_ALL : UVM_PROT_RW;
636	if (__predict_false(uvm_map(map, &kva, size, obj, UVM_UNKNOWN_OFFSET,
637	    align, UVM_MAPFLAG(vaprot, UVM_PROT_ALL, UVM_INH_NONE,
638	    UVM_ADV_RANDOM,
639	    (flags & (UVM_KMF_TRYLOCK | UVM_KMF_NOWAIT | UVM_KMF_WAITVA
640	     | UVM_KMF_COLORMATCH)))) != 0)) {
641		UVMHIST_LOG(maphist, "<- done (no VM)",0,0,0,0);
642		return(0);
643	}
644
645	/*
646	 * if all we wanted was VA, return now
647	 */
648
649	if (flags & (UVM_KMF_VAONLY | UVM_KMF_PAGEABLE)) {
650		UVMHIST_LOG(maphist,"<- done valloc (kva=%#jx)", kva,0,0,0);
651		return(kva);
652	}
653
654	/*
655	 * recover object offset from virtual address
656	 */
657
658	offset = kva - vm_map_min(kernel_map);
659	UVMHIST_LOG(maphist, "  kva=%#jx, offset=%#jx", kva, offset,0,0);
660
661	/*
662	 * now allocate and map in the memory... note that we are the only ones
663	 * whom should ever get a handle on this area of VM.
664	 */
665
666	loopva = kva;
667	loopsize = size;
668
669	pgaflags = UVM_FLAG_COLORMATCH;
670	if (flags & UVM_KMF_NOWAIT)
671		pgaflags |= UVM_PGA_USERESERVE;
672	if (flags & UVM_KMF_ZERO)
673		pgaflags |= UVM_PGA_ZERO;
674	prot = VM_PROT_READ | VM_PROT_WRITE;
675	if (flags & UVM_KMF_EXEC)
676		prot |= VM_PROT_EXECUTE;
677	while (loopsize) {
678		KASSERTMSG(!pmap_extract(pmap_kernel(), loopva, NULL),
679		    "loopva=%#"PRIxVADDR, loopva);
680
681		pg = uvm_pagealloc_strat(NULL, offset, NULL, pgaflags,
682#ifdef UVM_KM_VMFREELIST
683		   UVM_PGA_STRAT_ONLY, UVM_KM_VMFREELIST
684#else
685		   UVM_PGA_STRAT_NORMAL, 0
686#endif
687		   );
688
689		/*
690		 * out of memory?
691		 */
692
693		if (__predict_false(pg == NULL)) {
694			if ((flags & UVM_KMF_NOWAIT) ||
695			    ((flags & UVM_KMF_CANFAIL) && !uvm_reclaimable())) {
696				/* free everything! */
697				uvm_km_free(map, kva, size,
698				    flags & UVM_KMF_TYPEMASK);
699				return (0);
700			} else {
701				uvm_wait("km_getwait2");	/* sleep here */
702				continue;
703			}
704		}
705
706		pg->flags &= ~PG_BUSY;	/* new page */
707		UVM_PAGE_OWN(pg, NULL);
708
709		/*
710		 * map it in
711		 */
712
713		pmap_kenter_pa(loopva, VM_PAGE_TO_PHYS(pg),
714		    prot, PMAP_KMPAGE);
715		loopva += PAGE_SIZE;
716		offset += PAGE_SIZE;
717		loopsize -= PAGE_SIZE;
718	}
719
720	pmap_update(pmap_kernel());
721
722	if ((flags & UVM_KMF_ZERO) == 0) {
723		kmsan_orig((void *)kva, size, KMSAN_TYPE_UVM, __RET_ADDR);
724		kmsan_mark((void *)kva, size, KMSAN_STATE_UNINIT);
725	}
726
727	UVMHIST_LOG(maphist,"<- done (kva=%#jx)", kva,0,0,0);
728	return(kva);
729}
730
731/*
732 * uvm_km_protect: change the protection of an allocated area
733 */
734
735int
736uvm_km_protect(struct vm_map *map, vaddr_t addr, vsize_t size, vm_prot_t prot)
737{
738	return uvm_map_protect(map, addr, addr + round_page(size), prot, false);
739}
740
741/*
742 * uvm_km_free: free an area of kernel memory
743 */
744
745void
746uvm_km_free(struct vm_map *map, vaddr_t addr, vsize_t size, uvm_flag_t flags)
747{
748	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
749
750	KASSERT((flags & UVM_KMF_TYPEMASK) == UVM_KMF_WIRED ||
751		(flags & UVM_KMF_TYPEMASK) == UVM_KMF_PAGEABLE ||
752		(flags & UVM_KMF_TYPEMASK) == UVM_KMF_VAONLY);
753	KASSERT((addr & PAGE_MASK) == 0);
754	KASSERT(vm_map_pmap(map) == pmap_kernel());
755
756	size = round_page(size);
757
758	if (flags & UVM_KMF_PAGEABLE) {
759		uvm_km_pgremove(addr, addr + size);
760	} else if (flags & UVM_KMF_WIRED) {
761		/*
762		 * Note: uvm_km_pgremove_intrsafe() extracts mapping, thus
763		 * remove it after.  See comment below about KVA visibility.
764		 */
765		uvm_km_pgremove_intrsafe(map, addr, addr + size);
766	}
767
768	/*
769	 * Note: uvm_unmap_remove() calls pmap_update() for us, before
770	 * KVA becomes globally available.
771	 */
772
773	uvm_unmap1(map, addr, addr + size, UVM_FLAG_VAONLY);
774}
775
776/* Sanity; must specify both or none. */
777#if (defined(PMAP_MAP_POOLPAGE) || defined(PMAP_UNMAP_POOLPAGE)) && \
778    (!defined(PMAP_MAP_POOLPAGE) || !defined(PMAP_UNMAP_POOLPAGE))
779#error Must specify MAP and UNMAP together.
780#endif
781
782#if defined(PMAP_ALLOC_POOLPAGE) && \
783    !defined(PMAP_MAP_POOLPAGE) && !defined(PMAP_UNMAP_POOLPAGE)
784#error Must specify ALLOC with MAP and UNMAP
785#endif
786
787int
788uvm_km_kmem_alloc(vmem_t *vm, vmem_size_t size, vm_flag_t flags,
789    vmem_addr_t *addr)
790{
791	struct vm_page *pg;
792	vmem_addr_t va;
793	int rc;
794	vaddr_t loopva;
795	vsize_t loopsize;
796
797	size = round_page(size);
798
799#if defined(PMAP_MAP_POOLPAGE)
800	if (size == PAGE_SIZE) {
801again:
802#ifdef PMAP_ALLOC_POOLPAGE
803		pg = PMAP_ALLOC_POOLPAGE((flags & VM_SLEEP) ?
804		   0 : UVM_PGA_USERESERVE);
805#else
806		pg = uvm_pagealloc(NULL, 0, NULL,
807		   (flags & VM_SLEEP) ? 0 : UVM_PGA_USERESERVE);
808#endif /* PMAP_ALLOC_POOLPAGE */
809		if (__predict_false(pg == NULL)) {
810			if (flags & VM_SLEEP) {
811				uvm_wait("plpg");
812				goto again;
813			}
814			return ENOMEM;
815		}
816		va = PMAP_MAP_POOLPAGE(VM_PAGE_TO_PHYS(pg));
817		KASSERT(va != 0);
818		*addr = va;
819		return 0;
820	}
821#endif /* PMAP_MAP_POOLPAGE */
822
823	rc = vmem_alloc(vm, size, flags, &va);
824	if (rc != 0)
825		return rc;
826
827#ifdef PMAP_GROWKERNEL
828	/*
829	 * These VA allocations happen independently of uvm_map
830	 * so this allocation must not extend beyond the current limit.
831	 */
832	KASSERTMSG(uvm_maxkaddr >= va + size,
833	    "%#"PRIxVADDR" %#"PRIxPTR" %#zx",
834	    uvm_maxkaddr, va, size);
835#endif
836
837	loopva = va;
838	loopsize = size;
839
840	while (loopsize) {
841		paddr_t pa __diagused;
842		KASSERTMSG(!pmap_extract(pmap_kernel(), loopva, &pa),
843		    "loopva=%#"PRIxVADDR" loopsize=%#"PRIxVSIZE
844		    " pa=%#"PRIxPADDR" vmem=%p",
845		    loopva, loopsize, pa, vm);
846
847		pg = uvm_pagealloc(NULL, loopva, NULL,
848		    UVM_FLAG_COLORMATCH
849		    | ((flags & VM_SLEEP) ? 0 : UVM_PGA_USERESERVE));
850		if (__predict_false(pg == NULL)) {
851			if (flags & VM_SLEEP) {
852				uvm_wait("plpg");
853				continue;
854			} else {
855				uvm_km_pgremove_intrsafe(kernel_map, va,
856				    va + size);
857				vmem_free(vm, va, size);
858				return ENOMEM;
859			}
860		}
861
862		pg->flags &= ~PG_BUSY;	/* new page */
863		UVM_PAGE_OWN(pg, NULL);
864		pmap_kenter_pa(loopva, VM_PAGE_TO_PHYS(pg),
865		    VM_PROT_READ|VM_PROT_WRITE, PMAP_KMPAGE);
866
867		loopva += PAGE_SIZE;
868		loopsize -= PAGE_SIZE;
869	}
870	pmap_update(pmap_kernel());
871
872	*addr = va;
873
874	return 0;
875}
876
877void
878uvm_km_kmem_free(vmem_t *vm, vmem_addr_t addr, size_t size)
879{
880
881	size = round_page(size);
882#if defined(PMAP_UNMAP_POOLPAGE)
883	if (size == PAGE_SIZE) {
884		paddr_t pa;
885
886		pa = PMAP_UNMAP_POOLPAGE(addr);
887		uvm_pagefree(PHYS_TO_VM_PAGE(pa));
888		return;
889	}
890#endif /* PMAP_UNMAP_POOLPAGE */
891	uvm_km_pgremove_intrsafe(kernel_map, addr, addr + size);
892	pmap_update(pmap_kernel());
893
894	vmem_free(vm, addr, size);
895}
896
897bool
898uvm_km_va_starved_p(void)
899{
900	vmem_size_t total;
901	vmem_size_t free;
902
903	if (kmem_arena == NULL)
904		return false;
905
906	total = vmem_size(kmem_arena, VMEM_ALLOC|VMEM_FREE);
907	free = vmem_size(kmem_arena, VMEM_FREE);
908
909	return (free < (total / 10));
910}
911