1/*-
2 * Copyright (c) 2013 Ian Lepore <ian@freebsd.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD$");
29
30/* Routines for mapping device memory. */
31
32#include "opt_ddb.h"
33
34#include <sys/param.h>
35#include <sys/systm.h>
36#include <sys/devmap.h>
37#include <vm/vm.h>
38#include <vm/vm_extern.h>
39#include <vm/pmap.h>
40#include <machine/vmparam.h>
41
42static const struct devmap_entry *devmap_table;
43static boolean_t devmap_bootstrap_done = false;
44
45/*
46 * The allocated-kva (akva) devmap table and metadata.  Platforms can call
47 * devmap_add_entry() to add static device mappings to this table using
48 * automatically allocated virtual addresses carved out of the top of kva space.
49 * Allocation begins immediately below the ARM_VECTORS_HIGH address.
50 */
51#define	AKVA_DEVMAP_MAX_ENTRIES	32
52static struct devmap_entry	akva_devmap_entries[AKVA_DEVMAP_MAX_ENTRIES];
53static u_int			akva_devmap_idx;
54static vm_offset_t		akva_devmap_vaddr = DEVMAP_MAX_VADDR;
55
56#if defined(__aarch64__) || defined(__riscv__)
57extern int early_boot;
58#endif
59
60/*
61 * Print the contents of the static mapping table using the provided printf-like
62 * output function (which will be either printf or db_printf).
63 */
64static void
65devmap_dump_table(int (*prfunc)(const char *, ...))
66{
67	const struct devmap_entry *pd;
68
69	if (devmap_table == NULL || devmap_table[0].pd_size == 0) {
70		prfunc("No static device mappings.\n");
71		return;
72	}
73
74	prfunc("Static device mappings:\n");
75	for (pd = devmap_table; pd->pd_size != 0; ++pd) {
76		prfunc("  0x%08x - 0x%08x mapped at VA 0x%08x\n",
77		    pd->pd_pa, pd->pd_pa + pd->pd_size - 1, pd->pd_va);
78	}
79}
80
81/*
82 * Print the contents of the static mapping table.  Used for bootverbose.
83 */
84void
85devmap_print_table()
86{
87	devmap_dump_table(printf);
88}
89
90/*
91 * Return the "last" kva address used by the registered devmap table.  It's
92 * actually the lowest address used by the static mappings, i.e., the address of
93 * the first unusable byte of KVA.
94 */
95vm_offset_t
96devmap_lastaddr()
97{
98	const struct devmap_entry *pd;
99	vm_offset_t lowaddr;
100
101	if (akva_devmap_idx > 0)
102		return (akva_devmap_vaddr);
103
104	lowaddr = DEVMAP_MAX_VADDR;
105	for (pd = devmap_table; pd != NULL && pd->pd_size != 0; ++pd) {
106		if (lowaddr > pd->pd_va)
107			lowaddr = pd->pd_va;
108	}
109
110	return (lowaddr);
111}
112
113/*
114 * Add an entry to the internal "akva" static devmap table using the given
115 * physical address and size and a virtual address allocated from the top of
116 * kva.  This automatically registers the akva table on the first call, so all a
117 * platform has to do is call this routine to install as many mappings as it
118 * needs and when initarm() calls devmap_bootstrap() it will pick up all the
119 * entries in the akva table automatically.
120 */
121void
122devmap_add_entry(vm_paddr_t pa, vm_size_t sz)
123{
124	struct devmap_entry *m;
125
126	if (devmap_bootstrap_done)
127		panic("devmap_add_entry() after devmap_bootstrap()");
128
129	if (akva_devmap_idx == (AKVA_DEVMAP_MAX_ENTRIES - 1))
130		panic("AKVA_DEVMAP_MAX_ENTRIES is too small");
131
132	if (akva_devmap_idx == 0)
133		devmap_register_table(akva_devmap_entries);
134
135	/*
136	 * Allocate virtual address space from the top of kva downwards.  If the
137	 * range being mapped is aligned and sized to 1MB boundaries then also
138	 * align the virtual address to the next-lower 1MB boundary so that we
139	 * end up with a nice efficient section mapping.
140	 */
141#ifdef __arm__
142	if ((pa & 0x000fffff) == 0 && (sz & 0x000fffff) == 0) {
143		akva_devmap_vaddr = trunc_1mpage(akva_devmap_vaddr - sz);
144	} else
145#endif
146	{
147		akva_devmap_vaddr = trunc_page(akva_devmap_vaddr - sz);
148	}
149	m = &akva_devmap_entries[akva_devmap_idx++];
150	m->pd_va    = akva_devmap_vaddr;
151	m->pd_pa    = pa;
152	m->pd_size  = sz;
153}
154
155/*
156 * Register the given table as the one to use in devmap_bootstrap().
157 */
158void
159devmap_register_table(const struct devmap_entry *table)
160{
161
162	devmap_table = table;
163}
164
165/*
166 * Map all of the static regions in the devmap table, and remember the devmap
167 * table so the mapdev, ptov, and vtop functions can do lookups later.
168 *
169 * If a non-NULL table pointer is given it is used unconditionally, otherwise
170 * the previously-registered table is used.  This smooths transition from legacy
171 * code that fills in a local table then calls this function passing that table,
172 * and newer code that uses devmap_register_table() in platform-specific
173 * code, then lets the common initarm() call this function with a NULL pointer.
174 */
175void
176devmap_bootstrap(vm_offset_t l1pt, const struct devmap_entry *table)
177{
178	const struct devmap_entry *pd;
179
180	devmap_bootstrap_done = true;
181
182	/*
183	 * If given a table pointer, use it.  Otherwise, if a table was
184	 * previously registered, use it.  Otherwise, no work to do.
185	 */
186	if (table != NULL)
187		devmap_table = table;
188	else if (devmap_table == NULL)
189		return;
190
191	for (pd = devmap_table; pd->pd_size != 0; ++pd) {
192#if defined(__arm__)
193#if __ARM_ARCH >= 6
194		pmap_preboot_map_attr(pd->pd_pa, pd->pd_va, pd->pd_size,
195		    VM_PROT_READ | VM_PROT_WRITE, VM_MEMATTR_DEVICE);
196#else
197		pmap_map_chunk(l1pt, pd->pd_va, pd->pd_pa, pd->pd_size,
198		    VM_PROT_READ | VM_PROT_WRITE, PTE_DEVICE);
199#endif
200#elif defined(__aarch64__) || defined(__riscv__)
201		pmap_kenter_device(pd->pd_va, pd->pd_size, pd->pd_pa);
202#endif
203	}
204}
205
206/*
207 * Look up the given physical address in the static mapping data and return the
208 * corresponding virtual address, or NULL if not found.
209 */
210void *
211devmap_ptov(vm_paddr_t pa, vm_size_t size)
212{
213	const struct devmap_entry *pd;
214
215	if (devmap_table == NULL)
216		return (NULL);
217
218	for (pd = devmap_table; pd->pd_size != 0; ++pd) {
219		if (pa >= pd->pd_pa && pa + size <= pd->pd_pa + pd->pd_size)
220			return ((void *)(pd->pd_va + (pa - pd->pd_pa)));
221	}
222
223	return (NULL);
224}
225
226/*
227 * Look up the given virtual address in the static mapping data and return the
228 * corresponding physical address, or DEVMAP_PADDR_NOTFOUND if not found.
229 */
230vm_paddr_t
231devmap_vtop(void * vpva, vm_size_t size)
232{
233	const struct devmap_entry *pd;
234	vm_offset_t va;
235
236	if (devmap_table == NULL)
237		return (DEVMAP_PADDR_NOTFOUND);
238
239	va = (vm_offset_t)vpva;
240	for (pd = devmap_table; pd->pd_size != 0; ++pd) {
241		if (va >= pd->pd_va && va + size <= pd->pd_va + pd->pd_size)
242			return ((vm_paddr_t)(pd->pd_pa + (va - pd->pd_va)));
243	}
244
245	return (DEVMAP_PADDR_NOTFOUND);
246}
247
248/*
249 * Map a set of physical memory pages into the kernel virtual address space.
250 * Return a pointer to where it is mapped.
251 *
252 * This uses a pre-established static mapping if one exists for the requested
253 * range, otherwise it allocates kva space and maps the physical pages into it.
254 *
255 * This routine is intended to be used for mapping device memory, NOT real
256 * memory; the mapping type is inherently VM_MEMATTR_DEVICE in
257 * pmap_kenter_device().
258 */
259void *
260pmap_mapdev(vm_offset_t pa, vm_size_t size)
261{
262	vm_offset_t va, offset;
263	void * rva;
264
265	/* First look in the static mapping table. */
266	if ((rva = devmap_ptov(pa, size)) != NULL)
267		return (rva);
268
269	offset = pa & PAGE_MASK;
270	pa = trunc_page(pa);
271	size = round_page(size + offset);
272
273#if defined(__aarch64__) || defined(__riscv__)
274	if (early_boot) {
275		akva_devmap_vaddr = trunc_page(akva_devmap_vaddr - size);
276		va = akva_devmap_vaddr;
277		KASSERT(va >= VM_MAX_KERNEL_ADDRESS - L2_SIZE,
278		    ("Too many early devmap mappings"));
279	} else
280#endif
281		va = kva_alloc(size);
282	if (!va)
283		panic("pmap_mapdev: Couldn't alloc kernel virtual memory");
284
285	pmap_kenter_device(va, size, pa);
286
287	return ((void *)(va + offset));
288}
289
290/*
291 * Unmap device memory and free the kva space.
292 */
293void
294pmap_unmapdev(vm_offset_t va, vm_size_t size)
295{
296	vm_offset_t offset;
297
298	/* Nothing to do if we find the mapping in the static table. */
299	if (devmap_vtop((void*)va, size) != DEVMAP_PADDR_NOTFOUND)
300		return;
301
302	offset = va & PAGE_MASK;
303	va = trunc_page(va);
304	size = round_page(size + offset);
305
306	pmap_kremove_device(va, size);
307	kva_free(va, size);
308}
309
310#ifdef DDB
311#include <ddb/ddb.h>
312
313DB_SHOW_COMMAND(devmap, db_show_devmap)
314{
315	devmap_dump_table(db_printf);
316}
317
318#endif /* DDB */
319
320