vmparam.h revision 246554
1/*-
2 * Copyright (c) 1990 The Regents of the University of California.
3 * All rights reserved.
4 * Copyright (c) 1994 John S. Dyson
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * William Jolitz.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 *    may be used to endorse or promote products derived from this software
20 *    without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 *	from: @(#)vmparam.h     5.9 (Berkeley) 5/12/91
35 *	from: FreeBSD: src/sys/i386/include/vmparam.h,v 1.33 2000/03/30
36 * $FreeBSD: head/sys/sparc64/include/vmparam.h 246554 2013-02-08 15:52:20Z kib $
37 */
38
39#ifndef	_MACHINE_VMPARAM_H_
40#define	_MACHINE_VMPARAM_H_
41
42/*
43 * Virtual memory related constants, all in bytes
44 */
45#ifndef MAXTSIZ
46#define	MAXTSIZ		(1*1024*1024*1024)	/* max text size */
47#endif
48#ifndef DFLDSIZ
49#define	DFLDSIZ		(128*1024*1024)		/* initial data size limit */
50#endif
51#ifndef MAXDSIZ
52#define	MAXDSIZ		(1*1024*1024*1024)	/* max data size */
53#endif
54#ifndef	DFLSSIZ
55#define	DFLSSIZ		(128*1024*1024)		/* initial stack size limit */
56#endif
57#ifndef	MAXSSIZ
58#define	MAXSSIZ		(1*1024*1024*1024)	/* max stack size */
59#endif
60#ifndef	SGROWSIZ
61#define	SGROWSIZ	(128*1024)		/* amount to grow stack */
62#endif
63
64/*
65 * The physical address space is sparsely populated.
66 */
67#define	VM_PHYSSEG_SPARSE
68
69/*
70 * The number of PHYSSEG entries must be one greater than the number
71 * of phys_avail entries because the phys_avail entry that spans the
72 * largest physical address that is accessible by ISA DMA is split
73 * into two PHYSSEG entries.
74 */
75#define	VM_PHYSSEG_MAX		64
76
77/*
78 * Create three free page pools: VM_FREEPOOL_DEFAULT is the default pool
79 * from which physical pages are allocated and VM_FREEPOOL_DIRECT is
80 * the pool from which physical pages for small UMA objects are
81 * allocated.
82 */
83#define	VM_NFREEPOOL		3
84#define	VM_FREEPOOL_CACHE	2
85#define	VM_FREEPOOL_DEFAULT	0
86#define	VM_FREEPOOL_DIRECT	1
87
88/*
89 * Create two free page lists: VM_FREELIST_DEFAULT is for physical
90 * pages that are above the largest physical address that is
91 * accessible by ISA DMA and VM_FREELIST_ISADMA is for physical pages
92 * that are below that address.
93 */
94#define	VM_NFREELIST		2
95#define	VM_FREELIST_DEFAULT	0
96#define	VM_FREELIST_ISADMA	1
97
98/*
99 * An allocation size of 16MB is supported in order to optimize the
100 * use of the direct map by UMA.  Specifically, a cache line contains
101 * at most four TTEs, collectively mapping 16MB of physical memory.
102 * By reducing the number of distinct 16MB "pages" that are used by UMA,
103 * the physical memory allocator reduces the likelihood of both 4MB
104 * page TLB misses and cache misses caused by 4MB page TLB misses.
105 */
106#define	VM_NFREEORDER		12
107
108/*
109 * Only one memory domain.
110 */
111#ifndef VM_NDOMAIN
112#define	VM_NDOMAIN		1
113#endif
114
115/*
116 * Enable superpage reservations: 1 level.
117 */
118#ifndef	VM_NRESERVLEVEL
119#define	VM_NRESERVLEVEL		1
120#endif
121
122/*
123 * Level 0 reservations consist of 512 pages.
124 */
125#ifndef	VM_LEVEL_0_ORDER
126#define	VM_LEVEL_0_ORDER	9
127#endif
128
129/**
130 * Address space layout.
131 *
132 * UltraSPARC I and II implement a 44 bit virtual address space.  The address
133 * space is split into 2 regions at each end of the 64 bit address space, with
134 * an out of range "hole" in the middle.  UltraSPARC III implements the full
135 * 64 bit virtual address space, but we don't really have any use for it and
136 * 43 bits of user address space is considered to be "enough", so we ignore it.
137 *
138 * Upper region:	0xffffffffffffffff
139 *			0xfffff80000000000
140 *
141 * Hole:		0xfffff7ffffffffff
142 *			0x0000080000000000
143 *
144 * Lower region:	0x000007ffffffffff
145 *			0x0000000000000000
146 *
147 * In general we ignore the upper region, and use the lower region as mappable
148 * space.
149 *
150 * We define some interesting address constants:
151 *
152 * VM_MIN_ADDRESS and VM_MAX_ADDRESS define the start and end of the entire
153 * 64 bit address space, mostly just for convenience.
154 *
155 * VM_MIN_DIRECT_ADDRESS and VM_MAX_DIRECT_ADDRESS define the start and end
156 * of the direct mapped region.  This maps virtual addresses to physical
157 * addresses directly using 4mb tlb entries, with the physical address encoded
158 * in the lower 43 bits of virtual address.  These mappings are convenient
159 * because they do not require page tables, and because they never change they
160 * do not require tlb flushes.  However, since these mappings are cacheable,
161 * we must ensure that all pages accessed this way are either not double
162 * mapped, or that all other mappings have virtual color equal to physical
163 * color, in order to avoid creating illegal aliases in the data cache.
164 *
165 * VM_MIN_KERNEL_ADDRESS and VM_MAX_KERNEL_ADDRESS define the start and end of
166 * mappable kernel virtual address space.  VM_MIN_KERNEL_ADDRESS is basically
167 * arbitrary, a convenient address is chosen which allows both the kernel text
168 * and data and the prom's address space to be mapped with 1 4mb tsb page.
169 * VM_MAX_KERNEL_ADDRESS is variable, computed at startup time based on the
170 * amount of physical memory available.  Each 4mb tsb page provides 1g of
171 * virtual address space, with the only practical limit being available
172 * phsyical memory.
173 *
174 * VM_MIN_PROM_ADDRESS and VM_MAX_PROM_ADDRESS define the start and end of the
175 * prom address space.  On startup the prom's mappings are duplicated in the
176 * kernel tsb, to allow prom memory to be accessed normally by the kernel.
177 *
178 * VM_MIN_USER_ADDRESS and VM_MAX_USER_ADDRESS define the start and end of the
179 * user address space.  There are some hardware errata about using addresses
180 * at the boundary of the va hole, so we allow just under 43 bits of user
181 * address space.  Note that the kernel and user address spaces overlap, but
182 * this doesn't matter because they use different tlb contexts, and because
183 * the kernel address space is not mapped into each process' address space.
184 */
185#define	VM_MIN_ADDRESS		(0x0000000000000000UL)
186#define	VM_MAX_ADDRESS		(0xffffffffffffffffUL)
187
188#define	VM_MIN_DIRECT_ADDRESS	(0xfffff80000000000UL)
189#define	VM_MAX_DIRECT_ADDRESS	(VM_MAX_ADDRESS)
190
191#define	VM_MIN_KERNEL_ADDRESS	(0x00000000c0000000UL)
192#define	VM_MAX_KERNEL_ADDRESS	(vm_max_kernel_address)
193
194#define	VM_MIN_PROM_ADDRESS	(0x00000000f0000000UL)
195#define	VM_MAX_PROM_ADDRESS	(0x00000000ffffffffUL)
196
197#define	VM_MIN_USER_ADDRESS	(0x0000000000000000UL)
198#define	VM_MAX_USER_ADDRESS	(0x000007fe00000000UL)
199
200#define	VM_MINUSER_ADDRESS	(VM_MIN_USER_ADDRESS)
201#define	VM_MAXUSER_ADDRESS	(VM_MAX_USER_ADDRESS)
202
203#define	KERNBASE		(VM_MIN_KERNEL_ADDRESS)
204#define	PROMBASE		(VM_MIN_PROM_ADDRESS)
205#define	USRSTACK		(VM_MAX_USER_ADDRESS)
206
207/*
208 * Virtual size (bytes) for various kernel submaps.
209 */
210#ifndef	VM_KMEM_SIZE
211#define	VM_KMEM_SIZE		(16*1024*1024)
212#endif
213
214/*
215 * How many physical pages per KVA page allocated.
216 * min(max(max(VM_KMEM_SIZE, Physical memory/VM_KMEM_SIZE_SCALE),
217 *     VM_KMEM_SIZE_MIN), VM_KMEM_SIZE_MAX)
218 * is the total KVA space allocated for kmem_map.
219 */
220#ifndef VM_KMEM_SIZE_SCALE
221#define	VM_KMEM_SIZE_SCALE	(tsb_kernel_ldd_phys == 0 ? 3 : 2)
222#endif
223
224/*
225 * Ceiling on amount of kmem_map kva space.
226 */
227#ifndef VM_KMEM_SIZE_MAX
228#define	VM_KMEM_SIZE_MAX	((VM_MAX_KERNEL_ADDRESS - \
229    VM_MIN_KERNEL_ADDRESS + 1) * 3 / 5)
230#endif
231
232/*
233 * Initial pagein size of beginning of executable file.
234 */
235#ifndef	VM_INITIAL_PAGEIN
236#define	VM_INITIAL_PAGEIN	16
237#endif
238
239#define	UMA_MD_SMALL_ALLOC
240
241extern u_int tsb_kernel_ldd_phys;
242extern vm_offset_t vm_max_kernel_address;
243
244/*
245 * Older sparc64 machines have a virtually indexed L1 data cache of 16KB.
246 * Consequently, mapping the same physical page multiple times may have
247 * caching disabled.
248 */
249#define	ZERO_REGION_SIZE	PAGE_SIZE
250
251#endif /* !_MACHINE_VMPARAM_H_ */
252