1/*	$NetBSD: vmalloc.h,v 1.12 2022/02/26 15:57:22 rillig Exp $	*/
2
3/*-
4 * Copyright (c) 2013, 2018 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Taylor R. Campbell.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32#ifndef _LINUX_VMALLOC_H_
33#define _LINUX_VMALLOC_H_
34
35#include <uvm/uvm_extern.h>
36
37#include <linux/mm.h>
38#include <linux/mm_types.h>
39#include <linux/overflow.h>
40#include <linux/slab.h>
41
42#include <asm/page.h>
43
44struct notifier_block;
45
46/*
47 * XXX vmalloc and kvmalloc both use kmalloc.  If you change that, be
48 * sure to update this so kvfree in <linux/mm.h> still works on vmalloc
49 * addresses.
50 */
51
52static inline bool
53is_vmalloc_addr(void *addr)
54{
55	return true;
56}
57
58static inline void *
59vmalloc(unsigned long size)
60{
61	return kmalloc(size, GFP_KERNEL);
62}
63
64static inline void *
65vmalloc_user(unsigned long size)
66{
67	return kzalloc(size, GFP_KERNEL);
68}
69
70static inline void *
71vzalloc(unsigned long size)
72{
73	return kzalloc(size, GFP_KERNEL);
74}
75
76static inline void
77vfree(void *ptr)
78{
79	kfree(ptr);
80}
81
82#define	PAGE_KERNEL	UVM_PROT_RW
83
84/*
85 * vmap(pages, npages, flags, prot)
86 *
87 *	Map pages[0], pages[1], ..., pages[npages-1] into contiguous
88 *	kernel virtual address space with the specified protection, and
89 *	return a KVA pointer to the start.
90 *
91 *	prot may be a bitwise ior of UVM_PROT_READ/WRITE/EXEC and
92 *	PMAP_* cache flags accepted by pmap_enter().
93 */
94static inline void *
95vmap(struct page **pages, unsigned npages, unsigned long flags,
96    pgprot_t protflags)
97{
98	vm_prot_t justprot = protflags & UVM_PROT_ALL;
99	vaddr_t va;
100	unsigned i;
101
102	/* Allocate some KVA, or return NULL if we can't.  */
103	va = uvm_km_alloc(kernel_map, (vsize_t)npages << PAGE_SHIFT, PAGE_SIZE,
104	    UVM_KMF_VAONLY|UVM_KMF_NOWAIT);
105	if (va == 0)
106		return NULL;
107
108	/* Ask pmap to map the KVA to the specified page addresses.  */
109	for (i = 0; i < npages; i++) {
110		pmap_kenter_pa(va + i*PAGE_SIZE, page_to_phys(pages[i]),
111		    justprot, protflags);
112	}
113
114	/* Commit the pmap updates.  */
115	pmap_update(pmap_kernel());
116
117	return (void *)va;
118}
119
120/*
121 * vunmap(ptr, npages)
122 *
123 *	Unmap the KVA pages starting at ptr that were mapped by a call
124 *	to vmap with the same npages parameter.
125 */
126static inline void
127vunmap(void *ptr, unsigned npages)
128{
129	vaddr_t va = (vaddr_t)ptr;
130
131	/* Ask pmap to unmap the KVA.  */
132	pmap_kremove(va, (vsize_t)npages << PAGE_SHIFT);
133
134	/* Commit the pmap updates.  */
135	pmap_update(pmap_kernel());
136
137	/*
138	 * Now that the pmap is no longer mapping the KVA we allocated
139	 * on any CPU, it is safe to free the KVA.
140	 */
141	uvm_km_free(kernel_map, va, (vsize_t)npages << PAGE_SHIFT,
142	    UVM_KMF_VAONLY);
143}
144
145static inline int
146register_vmap_purge_notifier(struct notifier_block *nb __unused)
147{
148	return 0;
149}
150
151static inline int
152unregister_vmap_purge_notifier(struct notifier_block *nb __unused)
153{
154	return 0;
155}
156
157#endif  /* _LINUX_VMALLOC_H_ */
158