slb.c revision 212715
1209975Snwhitehorn/*-
2209975Snwhitehorn * Copyright (c) 2010 Nathan Whitehorn
3209975Snwhitehorn * All rights reserved.
4209975Snwhitehorn *
5209975Snwhitehorn * Redistribution and use in source and binary forms, with or without
6209975Snwhitehorn * modification, are permitted provided that the following conditions
7209975Snwhitehorn * are met:
8209975Snwhitehorn *
9209975Snwhitehorn * 1. Redistributions of source code must retain the above copyright
10209975Snwhitehorn *    notice, this list of conditions and the following disclaimer.
11209975Snwhitehorn * 2. Redistributions in binary form must reproduce the above copyright
12209975Snwhitehorn *    notice, this list of conditions and the following disclaimer in the
13209975Snwhitehorn *    documentation and/or other materials provided with the distribution.
14209975Snwhitehorn *
15209975Snwhitehorn * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16209975Snwhitehorn * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17209975Snwhitehorn * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18209975Snwhitehorn * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19209975Snwhitehorn * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20209975Snwhitehorn * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21209975Snwhitehorn * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22209975Snwhitehorn * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23209975Snwhitehorn * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24209975Snwhitehorn * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25209975Snwhitehorn *
26209975Snwhitehorn * $FreeBSD: head/sys/powerpc/aim/slb.c 212715 2010-09-16 00:22:25Z nwhitehorn $
27209975Snwhitehorn */
28209975Snwhitehorn
29209975Snwhitehorn#include <sys/param.h>
30209975Snwhitehorn#include <sys/kernel.h>
31209975Snwhitehorn#include <sys/lock.h>
32209975Snwhitehorn#include <sys/mutex.h>
33209975Snwhitehorn#include <sys/proc.h>
34209975Snwhitehorn#include <sys/systm.h>
35209975Snwhitehorn
36209975Snwhitehorn#include <vm/vm.h>
37209975Snwhitehorn#include <vm/pmap.h>
38209975Snwhitehorn#include <vm/uma.h>
39209975Snwhitehorn#include <vm/vm_map.h>
40209975Snwhitehorn
41209975Snwhitehorn#include <machine/md_var.h>
42209975Snwhitehorn#include <machine/pmap.h>
43209975Snwhitehorn#include <machine/vmparam.h>
44209975Snwhitehorn
45209975Snwhitehornuintptr_t moea64_get_unique_vsid(void);
46209975Snwhitehornvoid moea64_release_vsid(uint64_t vsid);
47212715Snwhitehornstatic void slb_zone_init(void *);
48209975Snwhitehorn
49212715Snwhitehornuma_zone_t slbt_zone;
50212715Snwhitehornuma_zone_t slb_cache_zone;
51212715Snwhitehorn
52212715SnwhitehornSYSINIT(slb_zone_init, SI_SUB_KMEM, SI_ORDER_ANY, slb_zone_init, NULL);
53212715Snwhitehorn
54212715Snwhitehornstruct slbtnode {
55212715Snwhitehorn	uint16_t	ua_alloc;
56212715Snwhitehorn	uint8_t		ua_level;
57212715Snwhitehorn	/* Only 36 bits needed for full 64-bit address space. */
58212715Snwhitehorn	uint64_t	ua_base;
59212715Snwhitehorn	union {
60212715Snwhitehorn		struct slbtnode	*ua_child[16];
61212715Snwhitehorn		struct slb	slb_entries[16];
62212715Snwhitehorn	} u;
63209975Snwhitehorn};
64209975Snwhitehorn
65212715Snwhitehorn/*
66212715Snwhitehorn * For a full 64-bit address space, there are 36 bits in play in an
67212715Snwhitehorn * esid, so 8 levels, with the leaf being at level 0.
68212715Snwhitehorn *
69212715Snwhitehorn * |3333|3322|2222|2222|1111|1111|11  |    |    |  esid
70212715Snwhitehorn * |5432|1098|7654|3210|9876|5432|1098|7654|3210|  bits
71212715Snwhitehorn * +----+----+----+----+----+----+----+----+----+--------
72212715Snwhitehorn * | 8  | 7  | 6  | 5  | 4  | 3  | 2  | 1  | 0  | level
73212715Snwhitehorn */
74212715Snwhitehorn#define UAD_ROOT_LEVEL  8
75212715Snwhitehorn#define UAD_LEAF_LEVEL  0
76209975Snwhitehorn
77212715Snwhitehornstatic inline int
78212715Snwhitehornesid2idx(uint64_t esid, int level)
79212715Snwhitehorn{
80212715Snwhitehorn	int shift;
81209975Snwhitehorn
82212715Snwhitehorn	shift = level * 4;
83212715Snwhitehorn	return ((esid >> shift) & 0xF);
84212715Snwhitehorn}
85209975Snwhitehorn
86212715Snwhitehorn/*
87212715Snwhitehorn * The ua_base field should have 0 bits after the first 4*(level+1)
88212715Snwhitehorn * bits; i.e. only
89212715Snwhitehorn */
90212715Snwhitehorn#define uad_baseok(ua)                          \
91212715Snwhitehorn	(esid2base(ua->ua_base, ua->ua_level) == ua->ua_base)
92209975Snwhitehorn
93212715Snwhitehorn
94212715Snwhitehornstatic inline uint64_t
95212715Snwhitehornesid2base(uint64_t esid, int level)
96209975Snwhitehorn{
97212715Snwhitehorn	uint64_t mask;
98212715Snwhitehorn	int shift;
99209975Snwhitehorn
100212715Snwhitehorn	shift = (level + 1) * 4;
101212715Snwhitehorn	mask = ~((1ULL << shift) - 1);
102212715Snwhitehorn	return (esid & mask);
103212715Snwhitehorn}
104212715Snwhitehorn
105212715Snwhitehorn/*
106212715Snwhitehorn * Allocate a new leaf node for the specified esid/vmhandle from the
107212715Snwhitehorn * parent node.
108212715Snwhitehorn */
109212715Snwhitehornstatic struct slb *
110212715Snwhitehornmake_new_leaf(uint64_t esid, uint64_t slbv, struct slbtnode *parent)
111212715Snwhitehorn{
112212715Snwhitehorn	struct slbtnode *child;
113212715Snwhitehorn	struct slb *retval;
114212715Snwhitehorn	int idx;
115212715Snwhitehorn
116212715Snwhitehorn	idx = esid2idx(esid, parent->ua_level);
117212715Snwhitehorn	KASSERT(parent->u.ua_child[idx] == NULL, ("Child already exists!"));
118212715Snwhitehorn
119212715Snwhitehorn	/* unlock and M_WAITOK and loop? */
120212715Snwhitehorn	child = uma_zalloc(slbt_zone, M_NOWAIT | M_ZERO);
121212715Snwhitehorn	KASSERT(child != NULL, ("unhandled NULL case"));
122212715Snwhitehorn
123212715Snwhitehorn	child->ua_level = UAD_LEAF_LEVEL;
124212715Snwhitehorn	child->ua_base = esid2base(esid, child->ua_level);
125212715Snwhitehorn	idx = esid2idx(esid, child->ua_level);
126212715Snwhitehorn	child->u.slb_entries[idx].slbv = slbv;
127212715Snwhitehorn	child->u.slb_entries[idx].slbe = (esid << SLBE_ESID_SHIFT) | SLBE_VALID;
128212715Snwhitehorn	setbit(&child->ua_alloc, idx);
129212715Snwhitehorn
130212715Snwhitehorn	retval = &child->u.slb_entries[idx];
131212715Snwhitehorn
132212715Snwhitehorn	/*
133212715Snwhitehorn	 * The above stores must be visible before the next one, so
134212715Snwhitehorn	 * that a lockless searcher always sees a valid path through
135212715Snwhitehorn	 * the tree.
136212715Snwhitehorn	 */
137212715Snwhitehorn	powerpc_sync();
138212715Snwhitehorn
139212715Snwhitehorn	idx = esid2idx(esid, parent->ua_level);
140212715Snwhitehorn	parent->u.ua_child[idx] = child;
141212715Snwhitehorn	setbit(&parent->ua_alloc, idx);
142212715Snwhitehorn
143212715Snwhitehorn	return (retval);
144212715Snwhitehorn}
145212715Snwhitehorn
146212715Snwhitehorn/*
147212715Snwhitehorn * Allocate a new intermediate node to fit between the parent and
148212715Snwhitehorn * esid.
149212715Snwhitehorn */
150212715Snwhitehornstatic struct slbtnode*
151212715Snwhitehornmake_intermediate(uint64_t esid, struct slbtnode *parent)
152212715Snwhitehorn{
153212715Snwhitehorn	struct slbtnode *child, *inter;
154212715Snwhitehorn	int idx, level;
155212715Snwhitehorn
156212715Snwhitehorn	idx = esid2idx(esid, parent->ua_level);
157212715Snwhitehorn	child = parent->u.ua_child[idx];
158212715Snwhitehorn	KASSERT(esid2base(esid, child->ua_level) != child->ua_base,
159212715Snwhitehorn	    ("No need for an intermediate node?"));
160212715Snwhitehorn
161212715Snwhitehorn	/*
162212715Snwhitehorn	 * Find the level where the existing child and our new esid
163212715Snwhitehorn	 * meet.  It must be lower than parent->ua_level or we would
164212715Snwhitehorn	 * have chosen a different index in parent.
165212715Snwhitehorn	 */
166212715Snwhitehorn	level = child->ua_level + 1;
167212715Snwhitehorn	while (esid2base(esid, level) !=
168212715Snwhitehorn	    esid2base(child->ua_base, level))
169212715Snwhitehorn		level++;
170212715Snwhitehorn	KASSERT(level < parent->ua_level,
171212715Snwhitehorn	    ("Found splitting level %d for %09jx and %09jx, "
172212715Snwhitehorn	    "but it's the same as %p's",
173212715Snwhitehorn	    level, esid, child->ua_base, parent));
174212715Snwhitehorn
175212715Snwhitehorn	/* unlock and M_WAITOK and loop? */
176212715Snwhitehorn	inter = uma_zalloc(slbt_zone, M_NOWAIT | M_ZERO);
177212715Snwhitehorn	KASSERT(inter != NULL, ("unhandled NULL case"));
178212715Snwhitehorn
179212715Snwhitehorn	/* Set up intermediate node to point to child ... */
180212715Snwhitehorn	inter->ua_level = level;
181212715Snwhitehorn	inter->ua_base = esid2base(esid, inter->ua_level);
182212715Snwhitehorn	idx = esid2idx(child->ua_base, inter->ua_level);
183212715Snwhitehorn	inter->u.ua_child[idx] = child;
184212715Snwhitehorn	setbit(&inter->ua_alloc, idx);
185212715Snwhitehorn	powerpc_sync();
186212715Snwhitehorn
187212715Snwhitehorn	/* Set up parent to point to intermediate node ... */
188212715Snwhitehorn	idx = esid2idx(inter->ua_base, parent->ua_level);
189212715Snwhitehorn	parent->u.ua_child[idx] = inter;
190212715Snwhitehorn	setbit(&parent->ua_alloc, idx);
191212715Snwhitehorn
192212715Snwhitehorn	return (inter);
193212715Snwhitehorn}
194212715Snwhitehorn
195212715Snwhitehornuint64_t
196212715Snwhitehornkernel_va_to_slbv(vm_offset_t va)
197212715Snwhitehorn{
198212715Snwhitehorn	uint64_t esid, slbv;
199212715Snwhitehorn
200209975Snwhitehorn	esid = (uintptr_t)va >> ADDR_SR_SHFT;
201209975Snwhitehorn
202212715Snwhitehorn	/* Set kernel VSID to deterministic value */
203212715Snwhitehorn	slbv = va_to_vsid(kernel_pmap, va) << SLBV_VSID_SHIFT;
204209975Snwhitehorn
205212715Snwhitehorn	/* Figure out if this is a large-page mapping */
206212715Snwhitehorn	if (hw_direct_map && va < VM_MIN_KERNEL_ADDRESS) {
207212715Snwhitehorn		/*
208212715Snwhitehorn		 * XXX: If we have set up a direct map, assumes
209212715Snwhitehorn		 * all physical memory is mapped with large pages.
210212715Snwhitehorn		 */
211212715Snwhitehorn		if (mem_valid(va, 0) == 0)
212212715Snwhitehorn			slbv |= SLBV_L;
213209975Snwhitehorn	}
214212715Snwhitehorn
215212715Snwhitehorn	return (slbv);
216212715Snwhitehorn}
217209975Snwhitehorn
218212715Snwhitehornstruct slb *
219212715Snwhitehornuser_va_to_slb_entry(pmap_t pm, vm_offset_t va)
220212715Snwhitehorn{
221212715Snwhitehorn	uint64_t esid = va >> ADDR_SR_SHFT;
222212715Snwhitehorn	struct slbtnode *ua;
223212715Snwhitehorn	int idx;
224209975Snwhitehorn
225212715Snwhitehorn	ua = pm->pm_slb_tree_root;
226209975Snwhitehorn
227212715Snwhitehorn	for (;;) {
228212715Snwhitehorn		KASSERT(uad_baseok(ua), ("uad base %016jx level %d bad!",
229212715Snwhitehorn		    ua->ua_base, ua->ua_level));
230212715Snwhitehorn		idx = esid2idx(esid, ua->ua_level);
231209975Snwhitehorn
232212715Snwhitehorn		/*
233212715Snwhitehorn		 * This code is specific to ppc64 where a load is
234212715Snwhitehorn		 * atomic, so no need for atomic_load macro.
235212715Snwhitehorn		 */
236212715Snwhitehorn		if (ua->ua_level == UAD_LEAF_LEVEL)
237212715Snwhitehorn			return ((ua->u.slb_entries[idx].slbe & SLBE_VALID) ?
238212715Snwhitehorn			    &ua->u.slb_entries[idx] : NULL);
239212715Snwhitehorn
240212715Snwhitehorn		ua = ua->u.ua_child[idx];
241212715Snwhitehorn		if (ua == NULL ||
242212715Snwhitehorn		    esid2base(esid, ua->ua_level) != ua->ua_base)
243212715Snwhitehorn			return (NULL);
244212715Snwhitehorn	}
245212715Snwhitehorn
246212715Snwhitehorn	return (NULL);
247209975Snwhitehorn}
248209975Snwhitehorn
249209975Snwhitehornuint64_t
250209975Snwhitehornva_to_vsid(pmap_t pm, vm_offset_t va)
251209975Snwhitehorn{
252212715Snwhitehorn	struct slb *entry;
253209975Snwhitehorn
254209975Snwhitehorn	/* Shortcut kernel case */
255210704Snwhitehorn	if (pm == kernel_pmap)
256210704Snwhitehorn		return (KERNEL_VSID((uintptr_t)va >> ADDR_SR_SHFT));
257209975Snwhitehorn
258209975Snwhitehorn	/*
259209975Snwhitehorn	 * If there is no vsid for this VA, we need to add a new entry
260209975Snwhitehorn	 * to the PMAP's segment table.
261209975Snwhitehorn	 */
262209975Snwhitehorn
263212715Snwhitehorn	entry = user_va_to_slb_entry(pm, va);
264212715Snwhitehorn
265212715Snwhitehorn	if (entry == NULL)
266209975Snwhitehorn		return (allocate_vsid(pm, (uintptr_t)va >> ADDR_SR_SHFT, 0));
267209975Snwhitehorn
268212715Snwhitehorn	return ((entry->slbv & SLBV_VSID_MASK) >> SLBV_VSID_SHIFT);
269209975Snwhitehorn}
270209975Snwhitehorn
271209975Snwhitehornuint64_t
272209975Snwhitehornallocate_vsid(pmap_t pm, uint64_t esid, int large)
273209975Snwhitehorn{
274212715Snwhitehorn	uint64_t vsid, slbv;
275212715Snwhitehorn	struct slbtnode *ua, *next, *inter;
276212715Snwhitehorn	struct slb *slb;
277212715Snwhitehorn	int idx;
278209975Snwhitehorn
279212715Snwhitehorn	KASSERT(pm != kernel_pmap, ("Attempting to allocate a kernel VSID"));
280209975Snwhitehorn
281212715Snwhitehorn	PMAP_LOCK_ASSERT(pm, MA_OWNED);
282212715Snwhitehorn	vsid = moea64_get_unique_vsid();
283209975Snwhitehorn
284212715Snwhitehorn	slbv = vsid << SLBV_VSID_SHIFT;
285212715Snwhitehorn	if (large)
286212715Snwhitehorn		slbv |= SLBV_L;
287209975Snwhitehorn
288212715Snwhitehorn	ua = pm->pm_slb_tree_root;
289209975Snwhitehorn
290212715Snwhitehorn	/* Descend to the correct leaf or NULL pointer. */
291212715Snwhitehorn	for (;;) {
292212715Snwhitehorn		KASSERT(uad_baseok(ua),
293212715Snwhitehorn		   ("uad base %09jx level %d bad!", ua->ua_base, ua->ua_level));
294212715Snwhitehorn		idx = esid2idx(esid, ua->ua_level);
295209975Snwhitehorn
296212715Snwhitehorn		if (ua->ua_level == UAD_LEAF_LEVEL) {
297212715Snwhitehorn			ua->u.slb_entries[idx].slbv = slbv;
298212715Snwhitehorn			eieio();
299212715Snwhitehorn			ua->u.slb_entries[idx].slbe = (esid << SLBE_ESID_SHIFT)
300212715Snwhitehorn			    | SLBE_VALID;
301212715Snwhitehorn			setbit(&ua->ua_alloc, idx);
302212715Snwhitehorn			slb = &ua->u.slb_entries[idx];
303212715Snwhitehorn			break;
304212715Snwhitehorn		}
305209975Snwhitehorn
306212715Snwhitehorn		next = ua->u.ua_child[idx];
307212715Snwhitehorn		if (next == NULL) {
308212715Snwhitehorn			slb = make_new_leaf(esid, slbv, ua);
309212715Snwhitehorn			break;
310212715Snwhitehorn                }
311212715Snwhitehorn
312212715Snwhitehorn		/*
313212715Snwhitehorn		 * Check if the next item down has an okay ua_base.
314212715Snwhitehorn		 * If not, we need to allocate an intermediate node.
315212715Snwhitehorn		 */
316212715Snwhitehorn		if (esid2base(esid, next->ua_level) != next->ua_base) {
317212715Snwhitehorn			inter = make_intermediate(esid, ua);
318212715Snwhitehorn			slb = make_new_leaf(esid, slbv, inter);
319212715Snwhitehorn			break;
320212715Snwhitehorn		}
321212715Snwhitehorn
322212715Snwhitehorn		ua = next;
323209975Snwhitehorn	}
324209975Snwhitehorn
325209975Snwhitehorn	/*
326209975Snwhitehorn	 * Someone probably wants this soon, and it may be a wired
327209975Snwhitehorn	 * SLB mapping, so pre-spill this entry.
328209975Snwhitehorn	 */
329212715Snwhitehorn	eieio();
330212715Snwhitehorn	slb_insert(pm, pm->pm_slb, slb);
331209975Snwhitehorn
332209975Snwhitehorn	return (vsid);
333209975Snwhitehorn}
334209975Snwhitehorn
335212715Snwhitehornvoid
336212715Snwhitehornfree_vsid(pmap_t pm, uint64_t esid, int large)
337212715Snwhitehorn{
338212715Snwhitehorn	struct slbtnode *ua;
339212715Snwhitehorn	int idx;
340212715Snwhitehorn
341212715Snwhitehorn	PMAP_LOCK_ASSERT(pm, MA_OWNED);
342212715Snwhitehorn
343212715Snwhitehorn	ua = pm->pm_slb_tree_root;
344212715Snwhitehorn	/* Descend to the correct leaf. */
345212715Snwhitehorn	for (;;) {
346212715Snwhitehorn		KASSERT(uad_baseok(ua),
347212715Snwhitehorn		   ("uad base %09jx level %d bad!", ua->ua_base, ua->ua_level));
348212715Snwhitehorn
349212715Snwhitehorn		idx = esid2idx(esid, ua->ua_level);
350212715Snwhitehorn		if (ua->ua_level == UAD_LEAF_LEVEL) {
351212715Snwhitehorn			ua->u.slb_entries[idx].slbv = 0;
352212715Snwhitehorn			eieio();
353212715Snwhitehorn			ua->u.slb_entries[idx].slbe = 0;
354212715Snwhitehorn			clrbit(&ua->ua_alloc, idx);
355212715Snwhitehorn			return;
356212715Snwhitehorn		}
357212715Snwhitehorn
358212715Snwhitehorn		ua = ua->u.ua_child[idx];
359212715Snwhitehorn		if (ua == NULL ||
360212715Snwhitehorn		    esid2base(esid, ua->ua_level) != ua->ua_base) {
361212715Snwhitehorn			/* Perhaps just return instead of assert? */
362212715Snwhitehorn			KASSERT(0,
363212715Snwhitehorn			    ("Asked to remove an entry that was never inserted!"));
364212715Snwhitehorn			return;
365212715Snwhitehorn		}
366212715Snwhitehorn	}
367212715Snwhitehorn}
368212715Snwhitehorn
369212715Snwhitehornstatic void
370212715Snwhitehornfree_slb_tree_node(struct slbtnode *ua)
371212715Snwhitehorn{
372212715Snwhitehorn	int idx;
373212715Snwhitehorn
374212715Snwhitehorn	for (idx = 0; idx < 16; idx++) {
375212715Snwhitehorn		if (ua->ua_level != UAD_LEAF_LEVEL) {
376212715Snwhitehorn			if (ua->u.ua_child[idx] != NULL)
377212715Snwhitehorn				free_slb_tree_node(ua->u.ua_child[idx]);
378212715Snwhitehorn		} else {
379212715Snwhitehorn			if (ua->u.slb_entries[idx].slbv != 0)
380212715Snwhitehorn				moea64_release_vsid(ua->u.slb_entries[idx].slbv
381212715Snwhitehorn				    >> SLBV_VSID_SHIFT);
382212715Snwhitehorn		}
383212715Snwhitehorn	}
384212715Snwhitehorn
385212715Snwhitehorn	uma_zfree(slbt_zone, ua);
386212715Snwhitehorn}
387212715Snwhitehorn
388212715Snwhitehornvoid
389212715Snwhitehornslb_free_tree(pmap_t pm)
390212715Snwhitehorn{
391212715Snwhitehorn
392212715Snwhitehorn	free_slb_tree_node(pm->pm_slb_tree_root);
393212715Snwhitehorn}
394212715Snwhitehorn
395212715Snwhitehornstruct slbtnode *
396212715Snwhitehornslb_alloc_tree(void)
397212715Snwhitehorn{
398212715Snwhitehorn	struct slbtnode *root;
399212715Snwhitehorn
400212715Snwhitehorn	root = uma_zalloc(slbt_zone, M_NOWAIT | M_ZERO);
401212715Snwhitehorn	root->ua_level = UAD_ROOT_LEVEL;
402212715Snwhitehorn
403212715Snwhitehorn	return (root);
404212715Snwhitehorn}
405212715Snwhitehorn
406209975Snwhitehorn/* Lock entries mapping kernel text and stacks */
407209975Snwhitehorn
408209975Snwhitehorn#define SLB_SPILLABLE(slbe) \
409209975Snwhitehorn	(((slbe & SLBE_ESID_MASK) < VM_MIN_KERNEL_ADDRESS && \
410209975Snwhitehorn	    (slbe & SLBE_ESID_MASK) > 16*SEGMENT_LENGTH) || \
411209975Snwhitehorn	    (slbe & SLBE_ESID_MASK) > VM_MAX_KERNEL_ADDRESS)
412209975Snwhitehornvoid
413209975Snwhitehornslb_insert(pmap_t pm, struct slb *slbcache, struct slb *slb_entry)
414209975Snwhitehorn{
415209975Snwhitehorn	uint64_t slbe, slbv;
416209975Snwhitehorn	int i, j, to_spill;
417209975Snwhitehorn
418209975Snwhitehorn	/* We don't want to be preempted while modifying the kernel map */
419209975Snwhitehorn	critical_enter();
420209975Snwhitehorn
421209975Snwhitehorn	to_spill = -1;
422209975Snwhitehorn	slbv = slb_entry->slbv;
423209975Snwhitehorn	slbe = slb_entry->slbe;
424209975Snwhitehorn
425209975Snwhitehorn	/* Hunt for a likely candidate */
426209975Snwhitehorn
427209975Snwhitehorn	for (i = mftb() % 64, j = 0; j < 64; j++, i = (i+1) % 64) {
428209975Snwhitehorn		if (pm == kernel_pmap && i == USER_SR)
429209975Snwhitehorn				continue;
430209975Snwhitehorn
431209975Snwhitehorn		if (!(slbcache[i].slbe & SLBE_VALID)) {
432209975Snwhitehorn			to_spill = i;
433209975Snwhitehorn			break;
434209975Snwhitehorn		}
435209975Snwhitehorn
436209975Snwhitehorn		if (to_spill < 0 && (pm != kernel_pmap ||
437209975Snwhitehorn		    SLB_SPILLABLE(slbcache[i].slbe)))
438209975Snwhitehorn			to_spill = i;
439209975Snwhitehorn	}
440209975Snwhitehorn
441209975Snwhitehorn	if (to_spill < 0)
442209975Snwhitehorn		panic("SLB spill on ESID %#lx, but no available candidates!\n",
443209975Snwhitehorn		   (slbe & SLBE_ESID_MASK) >> SLBE_ESID_SHIFT);
444209975Snwhitehorn
445209975Snwhitehorn	if (slbcache[to_spill].slbe & SLBE_VALID) {
446209975Snwhitehorn		/* Invalidate this first to avoid races */
447209975Snwhitehorn		slbcache[to_spill].slbe = 0;
448209975Snwhitehorn		mb();
449209975Snwhitehorn	}
450209975Snwhitehorn	slbcache[to_spill].slbv = slbv;
451209975Snwhitehorn	slbcache[to_spill].slbe = slbe | (uint64_t)to_spill;
452209975Snwhitehorn
453209975Snwhitehorn	/* If it is for this CPU, put it in the SLB right away */
454209975Snwhitehorn	if (pm == kernel_pmap && pmap_bootstrapped) {
455209975Snwhitehorn		/* slbie not required */
456209975Snwhitehorn		__asm __volatile ("slbmte %0, %1" ::
457209975Snwhitehorn		    "r"(slbcache[to_spill].slbv),
458209975Snwhitehorn		    "r"(slbcache[to_spill].slbe));
459209975Snwhitehorn	}
460209975Snwhitehorn
461209975Snwhitehorn	critical_exit();
462209975Snwhitehorn}
463209975Snwhitehorn
464209975Snwhitehorn
465209975Snwhitehornstatic void
466209975Snwhitehornslb_zone_init(void *dummy)
467209975Snwhitehorn{
468209975Snwhitehorn
469212715Snwhitehorn	slbt_zone = uma_zcreate("SLB tree node", sizeof(struct slbtnode),
470209975Snwhitehorn	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM);
471209975Snwhitehorn	slb_cache_zone = uma_zcreate("SLB cache", 64*sizeof(struct slb),
472209975Snwhitehorn	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM);
473209975Snwhitehorn}
474209975Snwhitehorn
475209975Snwhitehornstruct slb *
476209975Snwhitehornslb_alloc_user_cache(void)
477209975Snwhitehorn{
478209975Snwhitehorn	return (uma_zalloc(slb_cache_zone, M_ZERO));
479209975Snwhitehorn}
480209975Snwhitehorn
481209975Snwhitehornvoid
482209975Snwhitehornslb_free_user_cache(struct slb *slb)
483209975Snwhitehorn{
484209975Snwhitehorn	uma_zfree(slb_cache_zone, slb);
485209975Snwhitehorn}
486