slb.c revision 210704
1/*-
2 * Copyright (c) 2010 Nathan Whitehorn
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 *
26 * $FreeBSD: head/sys/powerpc/aim/slb.c 210704 2010-07-31 21:35:15Z nwhitehorn $
27 */
28
29#include <sys/param.h>
30#include <sys/kernel.h>
31#include <sys/lock.h>
32#include <sys/mutex.h>
33#include <sys/proc.h>
34#include <sys/systm.h>
35#include <sys/tree.h>
36
37#include <vm/vm.h>
38#include <vm/pmap.h>
39#include <vm/uma.h>
40#include <vm/vm_map.h>
41
42#include <machine/md_var.h>
43#include <machine/pmap.h>
44#include <machine/vmparam.h>
45
46uintptr_t moea64_get_unique_vsid(void);
47void moea64_release_vsid(uint64_t vsid);
48
49struct slbcontainer {
50	struct slb slb;
51	SPLAY_ENTRY(slbcontainer) slb_node;
52};
53
54static int slb_compare(struct slbcontainer *a, struct slbcontainer *b);
55static void slb_zone_init(void *);
56
57SPLAY_PROTOTYPE(slb_tree, slbcontainer, slb_node, slb_compare);
58SPLAY_GENERATE(slb_tree, slbcontainer, slb_node, slb_compare);
59
60uma_zone_t slb_zone;
61uma_zone_t slb_cache_zone;
62
63SYSINIT(slb_zone_init, SI_SUB_KMEM, SI_ORDER_ANY, slb_zone_init, NULL);
64
65int
66va_to_slb_entry(pmap_t pm, vm_offset_t va, struct slb *slb)
67{
68	struct slbcontainer cont, *found;
69	uint64_t esid;
70
71	esid = (uintptr_t)va >> ADDR_SR_SHFT;
72	slb->slbe = (esid << SLBE_ESID_SHIFT) | SLBE_VALID;
73
74	if (pm == kernel_pmap) {
75		/* Set kernel VSID to deterministic value */
76		slb->slbv = va_to_vsid(kernel_pmap, va) << SLBV_VSID_SHIFT;
77
78		/* Figure out if this is a large-page mapping */
79		if (hw_direct_map && va < VM_MIN_KERNEL_ADDRESS) {
80			/*
81			 * XXX: If we have set up a direct map, assumes
82			 * all physical memory is mapped with large pages.
83			 */
84			if (mem_valid(va, 0) == 0)
85				slb->slbv |= SLBV_L;
86		}
87
88		return (0);
89	}
90
91	PMAP_LOCK_ASSERT(pm, MA_OWNED);
92
93	cont.slb.slbe = slb->slbe;
94	found = SPLAY_FIND(slb_tree, &pm->pm_slbtree, &cont);
95
96	if (found == NULL)
97		return (-1);
98
99	slb->slbv = found->slb.slbv;
100	return (0);
101}
102
103uint64_t
104va_to_vsid(pmap_t pm, vm_offset_t va)
105{
106	struct slb entry;
107
108	/* Shortcut kernel case */
109	if (pm == kernel_pmap)
110		return (KERNEL_VSID((uintptr_t)va >> ADDR_SR_SHFT));
111
112	/*
113	 * If there is no vsid for this VA, we need to add a new entry
114	 * to the PMAP's segment table.
115	 */
116
117	if (va_to_slb_entry(pm, va, &entry) != 0)
118		return (allocate_vsid(pm, (uintptr_t)va >> ADDR_SR_SHFT, 0));
119
120	return ((entry.slbv & SLBV_VSID_MASK) >> SLBV_VSID_SHIFT);
121}
122
123uint64_t
124allocate_vsid(pmap_t pm, uint64_t esid, int large)
125{
126	uint64_t vsid;
127	struct slbcontainer *slb_entry, kern_entry;
128	struct slb *prespill;
129
130	prespill = NULL;
131
132	if (pm == kernel_pmap) {
133		vsid = va_to_vsid(pm, esid << ADDR_SR_SHFT);
134		slb_entry = &kern_entry;
135		prespill = PCPU_GET(slb);
136	} else {
137		vsid = moea64_get_unique_vsid();
138		slb_entry = uma_zalloc(slb_zone, M_NOWAIT);
139
140		if (slb_entry == NULL)
141			panic("Could not allocate SLB mapping!");
142
143		prespill = pm->pm_slb;
144	}
145
146	slb_entry->slb.slbe = (esid << SLBE_ESID_SHIFT) | SLBE_VALID;
147	slb_entry->slb.slbv = vsid << SLBV_VSID_SHIFT;
148
149	if (large)
150		slb_entry->slb.slbv |= SLBV_L;
151
152	if (pm != kernel_pmap) {
153		PMAP_LOCK_ASSERT(pm, MA_OWNED);
154		SPLAY_INSERT(slb_tree, &pm->pm_slbtree, slb_entry);
155	}
156
157	/*
158	 * Someone probably wants this soon, and it may be a wired
159	 * SLB mapping, so pre-spill this entry.
160	 */
161	if (prespill != NULL)
162		slb_insert(pm, prespill, &slb_entry->slb);
163
164	return (vsid);
165}
166
167/* Lock entries mapping kernel text and stacks */
168
169#define SLB_SPILLABLE(slbe) \
170	(((slbe & SLBE_ESID_MASK) < VM_MIN_KERNEL_ADDRESS && \
171	    (slbe & SLBE_ESID_MASK) > 16*SEGMENT_LENGTH) || \
172	    (slbe & SLBE_ESID_MASK) > VM_MAX_KERNEL_ADDRESS)
173void
174slb_insert(pmap_t pm, struct slb *slbcache, struct slb *slb_entry)
175{
176	uint64_t slbe, slbv;
177	int i, j, to_spill;
178
179	/* We don't want to be preempted while modifying the kernel map */
180	critical_enter();
181
182	to_spill = -1;
183	slbv = slb_entry->slbv;
184	slbe = slb_entry->slbe;
185
186	/* Hunt for a likely candidate */
187
188	for (i = mftb() % 64, j = 0; j < 64; j++, i = (i+1) % 64) {
189		if (pm == kernel_pmap && i == USER_SR)
190				continue;
191
192		if (!(slbcache[i].slbe & SLBE_VALID)) {
193			to_spill = i;
194			break;
195		}
196
197		if (to_spill < 0 && (pm != kernel_pmap ||
198		    SLB_SPILLABLE(slbcache[i].slbe)))
199			to_spill = i;
200	}
201
202	if (to_spill < 0)
203		panic("SLB spill on ESID %#lx, but no available candidates!\n",
204		   (slbe & SLBE_ESID_MASK) >> SLBE_ESID_SHIFT);
205
206	if (slbcache[to_spill].slbe & SLBE_VALID) {
207		/* Invalidate this first to avoid races */
208		slbcache[to_spill].slbe = 0;
209		mb();
210	}
211	slbcache[to_spill].slbv = slbv;
212	slbcache[to_spill].slbe = slbe | (uint64_t)to_spill;
213
214	/* If it is for this CPU, put it in the SLB right away */
215	if (pm == kernel_pmap && pmap_bootstrapped) {
216		/* slbie not required */
217		__asm __volatile ("slbmte %0, %1" ::
218		    "r"(slbcache[to_spill].slbv),
219		    "r"(slbcache[to_spill].slbe));
220	}
221
222	critical_exit();
223}
224
225int
226vsid_to_esid(pmap_t pm, uint64_t vsid, uint64_t *esid)
227{
228	uint64_t slbv;
229	struct slbcontainer *entry;
230
231#ifdef INVARIANTS
232	if (pm == kernel_pmap)
233		panic("vsid_to_esid only works on user pmaps");
234
235	PMAP_LOCK_ASSERT(pm, MA_OWNED);
236#endif
237
238	slbv = vsid << SLBV_VSID_SHIFT;
239
240	SPLAY_FOREACH(entry, slb_tree, &pm->pm_slbtree) {
241		if (slbv == entry->slb.slbv) {
242			*esid = entry->slb.slbe >> SLBE_ESID_SHIFT;
243			return (0);
244		}
245	}
246
247	return (-1);
248}
249
250void
251free_vsids(pmap_t pm)
252{
253	struct slbcontainer *entry;
254
255	while (!SPLAY_EMPTY(&pm->pm_slbtree)) {
256		entry = SPLAY_MIN(slb_tree, &pm->pm_slbtree);
257
258		SPLAY_REMOVE(slb_tree, &pm->pm_slbtree, entry);
259
260		moea64_release_vsid(entry->slb.slbv >> SLBV_VSID_SHIFT);
261		uma_zfree(slb_zone, entry);
262	}
263}
264
265static int
266slb_compare(struct slbcontainer *a, struct slbcontainer *b)
267{
268	if (a->slb.slbe == b->slb.slbe)
269		return (0);
270	else if (a->slb.slbe < b->slb.slbe)
271		return (-1);
272	else
273		return (1);
274}
275
276static void
277slb_zone_init(void *dummy)
278{
279
280	slb_zone = uma_zcreate("SLB segment", sizeof(struct slbcontainer),
281	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM);
282	slb_cache_zone = uma_zcreate("SLB cache", 64*sizeof(struct slb),
283	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM);
284}
285
286struct slb *
287slb_alloc_user_cache(void)
288{
289	return (uma_zalloc(slb_cache_zone, M_ZERO));
290}
291
292void
293slb_free_user_cache(struct slb *slb)
294{
295	uma_zfree(slb_cache_zone, slb);
296}
297