physmem.c revision 278730
1/*-
2 * Copyright (c) 2014 Ian Lepore <ian@freebsd.org>
3 * All rights excluded.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: stable/10/sys/arm/arm/physmem.c 278730 2015-02-13 23:30:48Z ian $");
29
30#include "opt_ddb.h"
31
32/*
33 * Routines for describing and initializing anything related to physical memory.
34 */
35
36#include <sys/param.h>
37#include <sys/systm.h>
38#include <vm/vm.h>
39#include <machine/md_var.h>
40#include <machine/physmem.h>
41
42/*
43 * These structures are used internally to keep track of regions of physical
44 * ram, and regions within the physical ram that need to be excluded.  An
45 * exclusion region can be excluded from crash dumps, from the vm pool of pages
46 * that can be allocated, or both, depending on the exclusion flags associated
47 * with the region.
48 */
49#define	MAX_HWCNT	10
50#define	MAX_EXCNT	10
51
52struct region {
53	vm_paddr_t	addr;
54	vm_size_t	size;
55	uint32_t	flags;
56};
57
58static struct region hwregions[MAX_HWCNT];
59static struct region exregions[MAX_EXCNT];
60
61static size_t hwcnt;
62static size_t excnt;
63
64/*
65 * These "avail lists" are globals used to communicate physical memory layout to
66 * other parts of the kernel.  Within the arrays, each value is the starting
67 * address of a contiguous area of physical address space.  The values at even
68 * indexes are areas that contain usable memory and the values at odd indexes
69 * are areas that aren't usable.  Each list is terminated by a pair of zero
70 * entries.
71 *
72 * dump_avail tells the dump code what regions to include in a crash dump, and
73 * phys_avail is the way we hand all the remaining physical ram we haven't used
74 * in early kernel init over to the vm system for allocation management.
75 *
76 * We size these arrays to hold twice as many available regions as we allow for
77 * hardware memory regions, to allow for the fact that exclusions can split a
78 * hardware region into two or more available regions.  In the real world there
79 * will typically be one or two hardware regions and two or three exclusions.
80 *
81 * Each available region in this list occupies two array slots (the start of the
82 * available region and the start of the unavailable region that follows it).
83 */
84#define	MAX_AVAIL_REGIONS	(MAX_HWCNT * 2)
85#define	MAX_AVAIL_ENTRIES	(MAX_AVAIL_REGIONS * 2)
86
87vm_paddr_t phys_avail[MAX_AVAIL_ENTRIES + 2]; /* +2 to allow for a pair  */
88vm_paddr_t dump_avail[MAX_AVAIL_ENTRIES + 2]; /* of zeroes to terminate. */
89
90/*
91 * realmem is the total number of hardware pages, excluded or not.
92 * Maxmem is one greater than the last physical page number.
93 */
94long realmem;
95long Maxmem;
96
97/* The address at which the kernel was loaded.  Set early in initarm(). */
98vm_paddr_t arm_physmem_kernaddr;
99
100/*
101 * Print the contents of the physical and excluded region tables using the
102 * provided printf-like output function (which will be either printf or
103 * db_printf).
104 */
105static void
106physmem_dump_tables(int (*prfunc)(const char *, ...))
107{
108	int flags, i;
109	uintmax_t addr, size;
110	const unsigned int mbyte = 1024 * 1024;
111
112	prfunc("Physical memory chunk(s):\n");
113	for (i = 0; i < hwcnt; ++i) {
114		addr = hwregions[i].addr;
115		size = hwregions[i].size;
116		prfunc("  0x%08jx - 0x%08jx, %5ju MB (%7ju pages)\n", addr,
117		    addr + size - 1, size / mbyte, size / PAGE_SIZE);
118	}
119
120	prfunc("Excluded memory regions:\n");
121	for (i = 0; i < excnt; ++i) {
122		addr  = exregions[i].addr;
123		size  = exregions[i].size;
124		flags = exregions[i].flags;
125		prfunc("  0x%08jx - 0x%08jx, %5ju MB (%7ju pages) %s %s\n",
126		    addr, addr + size - 1, size / mbyte, size / PAGE_SIZE,
127		    (flags & EXFLAG_NOALLOC) ? "NoAlloc" : "",
128		    (flags & EXFLAG_NODUMP)  ? "NoDump" : "");
129	}
130
131#ifdef DEBUG
132	prfunc("Avail lists:\n");
133	for (i = 0; phys_avail[i] != 0; ++i) {
134		prfunc("  phys_avail[%d] 0x%08x\n", i, phys_avail[i]);
135	}
136	for (i = 0; dump_avail[i] != 0; ++i) {
137		prfunc("  dump_avail[%d] 0x%08x\n", i, dump_avail[i]);
138	}
139#endif
140}
141
142/*
143 * Print the contents of the static mapping table.  Used for bootverbose.
144 */
145void
146arm_physmem_print_tables()
147{
148
149	physmem_dump_tables(printf);
150}
151
152/*
153 * Walk the list of hardware regions, processing it against the list of
154 * exclusions that contain the given exflags, and generating an "avail list".
155 *
156 * Updates the kernel global 'realmem' with the sum of all pages in hw regions.
157 *
158 * Returns the number of pages of non-excluded memory added to the avail list.
159 */
160static size_t
161regions_to_avail(vm_paddr_t *avail, uint32_t exflags, long *pavail)
162{
163	size_t acnt, exi, hwi;
164	vm_paddr_t end, start, xend, xstart;
165	long availmem;
166	const struct region *exp, *hwp;
167
168	realmem = 0;
169	availmem = 0;
170	acnt = 0;
171	for (hwi = 0, hwp = hwregions; hwi < hwcnt; ++hwi, ++hwp) {
172		start = hwp->addr;
173		end   = hwp->size + start;
174		realmem += arm32_btop(end - start);
175		for (exi = 0, exp = exregions; exi < excnt; ++exi, ++exp) {
176			/*
177			 * If the excluded region does not match given flags,
178			 * continue checking with the next excluded region.
179			 */
180			if ((exp->flags & exflags) == 0)
181				continue;
182			xstart = exp->addr;
183			xend   = exp->size + xstart;
184			/*
185			 * If the excluded region ends before this hw region,
186			 * continue checking with the next excluded region.
187			 */
188			if (xend <= start)
189				continue;
190			/*
191			 * If the excluded region begins after this hw region
192			 * we're done because both lists are sorted.
193			 */
194			if (xstart >= end)
195				break;
196			/*
197			 * If the excluded region completely covers this hw
198			 * region, shrink this hw region to zero size.
199			 */
200			if ((start >= xstart) && (end <= xend)) {
201				start = xend;
202				end = xend;
203				break;
204			}
205			/*
206			 * If the excluded region falls wholly within this hw
207			 * region without abutting or overlapping the beginning
208			 * or end, create an available entry from the leading
209			 * fragment, then adjust the start of this hw region to
210			 * the end of the excluded region, and continue checking
211			 * the next excluded region because another exclusion
212			 * could affect the remainder of this hw region.
213			 */
214			if ((xstart > start) && (xend < end)) {
215				avail[acnt++] = start;
216				avail[acnt++] = xstart;
217				availmem += arm32_btop(xstart - start);
218				start = xend;
219				continue;
220			}
221			/*
222			 * We know the excluded region overlaps either the start
223			 * or end of this hardware region (but not both), trim
224			 * the excluded portion off the appropriate end.
225			 */
226			if (xstart <= start)
227				start = xend;
228			else
229				end = xstart;
230		}
231		/*
232		 * If the trimming actions above left a non-zero size, create an
233		 * available entry for it.
234		 */
235		if (end > start) {
236			avail[acnt++] = start;
237			avail[acnt++] = end;
238			availmem += arm32_btop(end - start);
239		}
240		if (acnt >= MAX_AVAIL_ENTRIES)
241			panic("Not enough space in the dump/phys_avail arrays");
242	}
243
244	if (pavail)
245		*pavail = availmem;
246	return (acnt);
247}
248
249/*
250 * Insertion-sort a new entry into a regions list; sorted by start address.
251 */
252static void
253insert_region(struct region *regions, size_t rcnt, vm_paddr_t addr,
254    vm_size_t size, uint32_t flags)
255{
256	size_t i;
257	struct region *ep, *rp;
258
259	ep = regions + rcnt;
260	for (i = 0, rp = regions; i < rcnt; ++i, ++rp) {
261		if (addr < rp->addr) {
262			bcopy(rp, rp + 1, (ep - rp) * sizeof(*rp));
263			break;
264		}
265	}
266	rp->addr  = addr;
267	rp->size  = size;
268	rp->flags = flags;
269}
270
271/*
272 * Add a hardware memory region.
273 */
274void
275arm_physmem_hardware_region(vm_paddr_t pa, vm_size_t sz)
276{
277	vm_offset_t adj;
278
279	/*
280	 * Filter out the page at PA 0x00000000.  The VM can't handle it, as
281	 * pmap_extract() == 0 means failure.
282	 */
283	if (pa == 0) {
284		pa  = PAGE_SIZE;
285		sz -= PAGE_SIZE;
286	}
287
288	/*
289	 * Round the starting address up to a page boundary, and truncate the
290	 * ending page down to a page boundary.
291	 */
292	adj = round_page(pa) - pa;
293	pa  = round_page(pa);
294	sz  = trunc_page(sz - adj);
295
296	if (hwcnt < nitems(hwregions))
297		insert_region(hwregions, hwcnt++, pa, sz, 0);
298}
299
300/*
301 * Add an exclusion region.
302 */
303void arm_physmem_exclude_region(vm_paddr_t pa, vm_size_t sz, uint32_t exflags)
304{
305	vm_offset_t adj;
306
307	/*
308	 * Truncate the starting address down to a page boundary, and round the
309	 * ending page up to a page boundary.
310	 */
311	adj = pa - trunc_page(pa);
312	pa  = trunc_page(pa);
313	sz  = round_page(sz + adj);
314
315	if (excnt < nitems(exregions))
316		insert_region(exregions, excnt++, pa, sz, exflags);
317}
318
319/*
320 * Process all the regions added earlier into the global avail lists.
321 *
322 * Updates the kernel global 'physmem' with the number of physical pages
323 * available for use (all pages not in any exclusion region).
324 *
325 * Updates the kernel global 'Maxmem' with the page number one greater then the
326 * last page of physical memory in the system.
327 */
328void
329arm_physmem_init_kernel_globals(void)
330{
331	size_t nextidx;
332
333	regions_to_avail(dump_avail, EXFLAG_NODUMP, NULL);
334	nextidx = regions_to_avail(phys_avail, EXFLAG_NOALLOC, &physmem);
335	if (nextidx == 0)
336		panic("No memory entries in phys_avail");
337	Maxmem = atop(phys_avail[nextidx - 1]);
338}
339
340#ifdef DDB
341#include <ddb/ddb.h>
342
343DB_SHOW_COMMAND(physmem, db_show_physmem)
344{
345
346	physmem_dump_tables(db_printf);
347}
348
349#endif /* DDB */
350
351