1209975Snwhitehorn/*-
2209975Snwhitehorn * Copyright (c) 2010 Nathan Whitehorn
3209975Snwhitehorn * All rights reserved.
4209975Snwhitehorn *
5209975Snwhitehorn * Redistribution and use in source and binary forms, with or without
6209975Snwhitehorn * modification, are permitted provided that the following conditions
7209975Snwhitehorn * are met:
8209975Snwhitehorn *
9209975Snwhitehorn * 1. Redistributions of source code must retain the above copyright
10209975Snwhitehorn *    notice, this list of conditions and the following disclaimer.
11209975Snwhitehorn * 2. Redistributions in binary form must reproduce the above copyright
12209975Snwhitehorn *    notice, this list of conditions and the following disclaimer in the
13209975Snwhitehorn *    documentation and/or other materials provided with the distribution.
14209975Snwhitehorn *
15209975Snwhitehorn * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16209975Snwhitehorn * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17209975Snwhitehorn * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18209975Snwhitehorn * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19209975Snwhitehorn * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20209975Snwhitehorn * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21209975Snwhitehorn * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22209975Snwhitehorn * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23209975Snwhitehorn * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24209975Snwhitehorn * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25209975Snwhitehorn *
26209975Snwhitehorn * $FreeBSD: stable/11/sys/powerpc/aim/slb.c 327785 2018-01-10 20:39:26Z markj $
27209975Snwhitehorn */
28209975Snwhitehorn
29209975Snwhitehorn#include <sys/param.h>
30209975Snwhitehorn#include <sys/kernel.h>
31209975Snwhitehorn#include <sys/lock.h>
32243040Skib#include <sys/malloc.h>
33209975Snwhitehorn#include <sys/mutex.h>
34209975Snwhitehorn#include <sys/proc.h>
35209975Snwhitehorn#include <sys/systm.h>
36209975Snwhitehorn
37209975Snwhitehorn#include <vm/vm.h>
38209975Snwhitehorn#include <vm/pmap.h>
39209975Snwhitehorn#include <vm/uma.h>
40215159Snwhitehorn#include <vm/vm.h>
41209975Snwhitehorn#include <vm/vm_map.h>
42215159Snwhitehorn#include <vm/vm_page.h>
43215159Snwhitehorn#include <vm/vm_pageout.h>
44209975Snwhitehorn
45209975Snwhitehorn#include <machine/md_var.h>
46215159Snwhitehorn#include <machine/platform.h>
47209975Snwhitehorn#include <machine/vmparam.h>
48209975Snwhitehorn
49209975Snwhitehornuintptr_t moea64_get_unique_vsid(void);
50209975Snwhitehornvoid moea64_release_vsid(uint64_t vsid);
51212715Snwhitehornstatic void slb_zone_init(void *);
52209975Snwhitehorn
53222620Snwhitehornstatic uma_zone_t slbt_zone;
54222620Snwhitehornstatic uma_zone_t slb_cache_zone;
55222620Snwhitehornint n_slbs = 64;
56212715Snwhitehorn
57212715SnwhitehornSYSINIT(slb_zone_init, SI_SUB_KMEM, SI_ORDER_ANY, slb_zone_init, NULL);
58212715Snwhitehorn
59212715Snwhitehornstruct slbtnode {
60212715Snwhitehorn	uint16_t	ua_alloc;
61212715Snwhitehorn	uint8_t		ua_level;
62212715Snwhitehorn	/* Only 36 bits needed for full 64-bit address space. */
63212715Snwhitehorn	uint64_t	ua_base;
64212715Snwhitehorn	union {
65212715Snwhitehorn		struct slbtnode	*ua_child[16];
66212715Snwhitehorn		struct slb	slb_entries[16];
67212715Snwhitehorn	} u;
68209975Snwhitehorn};
69209975Snwhitehorn
70212715Snwhitehorn/*
71212715Snwhitehorn * For a full 64-bit address space, there are 36 bits in play in an
72212715Snwhitehorn * esid, so 8 levels, with the leaf being at level 0.
73212715Snwhitehorn *
74212715Snwhitehorn * |3333|3322|2222|2222|1111|1111|11  |    |    |  esid
75212715Snwhitehorn * |5432|1098|7654|3210|9876|5432|1098|7654|3210|  bits
76212715Snwhitehorn * +----+----+----+----+----+----+----+----+----+--------
77212715Snwhitehorn * | 8  | 7  | 6  | 5  | 4  | 3  | 2  | 1  | 0  | level
78212715Snwhitehorn */
79212715Snwhitehorn#define UAD_ROOT_LEVEL  8
80212715Snwhitehorn#define UAD_LEAF_LEVEL  0
81209975Snwhitehorn
82212715Snwhitehornstatic inline int
83212715Snwhitehornesid2idx(uint64_t esid, int level)
84212715Snwhitehorn{
85212715Snwhitehorn	int shift;
86209975Snwhitehorn
87212715Snwhitehorn	shift = level * 4;
88212715Snwhitehorn	return ((esid >> shift) & 0xF);
89212715Snwhitehorn}
90209975Snwhitehorn
91212715Snwhitehorn/*
92212715Snwhitehorn * The ua_base field should have 0 bits after the first 4*(level+1)
93212715Snwhitehorn * bits; i.e. only
94212715Snwhitehorn */
95212715Snwhitehorn#define uad_baseok(ua)                          \
96212715Snwhitehorn	(esid2base(ua->ua_base, ua->ua_level) == ua->ua_base)
97209975Snwhitehorn
98212715Snwhitehorn
99212715Snwhitehornstatic inline uint64_t
100212715Snwhitehornesid2base(uint64_t esid, int level)
101209975Snwhitehorn{
102212715Snwhitehorn	uint64_t mask;
103212715Snwhitehorn	int shift;
104209975Snwhitehorn
105212715Snwhitehorn	shift = (level + 1) * 4;
106212715Snwhitehorn	mask = ~((1ULL << shift) - 1);
107212715Snwhitehorn	return (esid & mask);
108212715Snwhitehorn}
109212715Snwhitehorn
110212715Snwhitehorn/*
111212715Snwhitehorn * Allocate a new leaf node for the specified esid/vmhandle from the
112212715Snwhitehorn * parent node.
113212715Snwhitehorn */
114212715Snwhitehornstatic struct slb *
115212715Snwhitehornmake_new_leaf(uint64_t esid, uint64_t slbv, struct slbtnode *parent)
116212715Snwhitehorn{
117212715Snwhitehorn	struct slbtnode *child;
118212715Snwhitehorn	struct slb *retval;
119212715Snwhitehorn	int idx;
120212715Snwhitehorn
121212715Snwhitehorn	idx = esid2idx(esid, parent->ua_level);
122212715Snwhitehorn	KASSERT(parent->u.ua_child[idx] == NULL, ("Child already exists!"));
123212715Snwhitehorn
124212715Snwhitehorn	/* unlock and M_WAITOK and loop? */
125212715Snwhitehorn	child = uma_zalloc(slbt_zone, M_NOWAIT | M_ZERO);
126212715Snwhitehorn	KASSERT(child != NULL, ("unhandled NULL case"));
127212715Snwhitehorn
128212715Snwhitehorn	child->ua_level = UAD_LEAF_LEVEL;
129212715Snwhitehorn	child->ua_base = esid2base(esid, child->ua_level);
130212715Snwhitehorn	idx = esid2idx(esid, child->ua_level);
131212715Snwhitehorn	child->u.slb_entries[idx].slbv = slbv;
132212715Snwhitehorn	child->u.slb_entries[idx].slbe = (esid << SLBE_ESID_SHIFT) | SLBE_VALID;
133212715Snwhitehorn	setbit(&child->ua_alloc, idx);
134212715Snwhitehorn
135212715Snwhitehorn	retval = &child->u.slb_entries[idx];
136212715Snwhitehorn
137212715Snwhitehorn	/*
138212715Snwhitehorn	 * The above stores must be visible before the next one, so
139212715Snwhitehorn	 * that a lockless searcher always sees a valid path through
140212715Snwhitehorn	 * the tree.
141212715Snwhitehorn	 */
142291262Snwhitehorn	powerpc_lwsync();
143212715Snwhitehorn
144212715Snwhitehorn	idx = esid2idx(esid, parent->ua_level);
145212715Snwhitehorn	parent->u.ua_child[idx] = child;
146212715Snwhitehorn	setbit(&parent->ua_alloc, idx);
147212715Snwhitehorn
148212715Snwhitehorn	return (retval);
149212715Snwhitehorn}
150212715Snwhitehorn
151212715Snwhitehorn/*
152212715Snwhitehorn * Allocate a new intermediate node to fit between the parent and
153212715Snwhitehorn * esid.
154212715Snwhitehorn */
155212715Snwhitehornstatic struct slbtnode*
156212715Snwhitehornmake_intermediate(uint64_t esid, struct slbtnode *parent)
157212715Snwhitehorn{
158212715Snwhitehorn	struct slbtnode *child, *inter;
159212715Snwhitehorn	int idx, level;
160212715Snwhitehorn
161212715Snwhitehorn	idx = esid2idx(esid, parent->ua_level);
162212715Snwhitehorn	child = parent->u.ua_child[idx];
163212715Snwhitehorn	KASSERT(esid2base(esid, child->ua_level) != child->ua_base,
164212715Snwhitehorn	    ("No need for an intermediate node?"));
165212715Snwhitehorn
166212715Snwhitehorn	/*
167212715Snwhitehorn	 * Find the level where the existing child and our new esid
168212715Snwhitehorn	 * meet.  It must be lower than parent->ua_level or we would
169212715Snwhitehorn	 * have chosen a different index in parent.
170212715Snwhitehorn	 */
171212715Snwhitehorn	level = child->ua_level + 1;
172212715Snwhitehorn	while (esid2base(esid, level) !=
173212715Snwhitehorn	    esid2base(child->ua_base, level))
174212715Snwhitehorn		level++;
175212715Snwhitehorn	KASSERT(level < parent->ua_level,
176212715Snwhitehorn	    ("Found splitting level %d for %09jx and %09jx, "
177212715Snwhitehorn	    "but it's the same as %p's",
178212715Snwhitehorn	    level, esid, child->ua_base, parent));
179212715Snwhitehorn
180212715Snwhitehorn	/* unlock and M_WAITOK and loop? */
181212715Snwhitehorn	inter = uma_zalloc(slbt_zone, M_NOWAIT | M_ZERO);
182212715Snwhitehorn	KASSERT(inter != NULL, ("unhandled NULL case"));
183212715Snwhitehorn
184212715Snwhitehorn	/* Set up intermediate node to point to child ... */
185212715Snwhitehorn	inter->ua_level = level;
186212715Snwhitehorn	inter->ua_base = esid2base(esid, inter->ua_level);
187212715Snwhitehorn	idx = esid2idx(child->ua_base, inter->ua_level);
188212715Snwhitehorn	inter->u.ua_child[idx] = child;
189212715Snwhitehorn	setbit(&inter->ua_alloc, idx);
190291262Snwhitehorn	powerpc_lwsync();
191212715Snwhitehorn
192212715Snwhitehorn	/* Set up parent to point to intermediate node ... */
193212715Snwhitehorn	idx = esid2idx(inter->ua_base, parent->ua_level);
194212715Snwhitehorn	parent->u.ua_child[idx] = inter;
195212715Snwhitehorn	setbit(&parent->ua_alloc, idx);
196212715Snwhitehorn
197212715Snwhitehorn	return (inter);
198212715Snwhitehorn}
199212715Snwhitehorn
200212715Snwhitehornuint64_t
201212715Snwhitehornkernel_va_to_slbv(vm_offset_t va)
202212715Snwhitehorn{
203217451Sandreast	uint64_t slbv;
204212715Snwhitehorn
205212715Snwhitehorn	/* Set kernel VSID to deterministic value */
206214574Snwhitehorn	slbv = (KERNEL_VSID((uintptr_t)va >> ADDR_SR_SHFT)) << SLBV_VSID_SHIFT;
207209975Snwhitehorn
208212715Snwhitehorn	/* Figure out if this is a large-page mapping */
209212715Snwhitehorn	if (hw_direct_map && va < VM_MIN_KERNEL_ADDRESS) {
210212715Snwhitehorn		/*
211212715Snwhitehorn		 * XXX: If we have set up a direct map, assumes
212212715Snwhitehorn		 * all physical memory is mapped with large pages.
213212715Snwhitehorn		 */
214212715Snwhitehorn		if (mem_valid(va, 0) == 0)
215212715Snwhitehorn			slbv |= SLBV_L;
216209975Snwhitehorn	}
217212715Snwhitehorn
218212715Snwhitehorn	return (slbv);
219212715Snwhitehorn}
220209975Snwhitehorn
221212715Snwhitehornstruct slb *
222212715Snwhitehornuser_va_to_slb_entry(pmap_t pm, vm_offset_t va)
223212715Snwhitehorn{
224212715Snwhitehorn	uint64_t esid = va >> ADDR_SR_SHFT;
225212715Snwhitehorn	struct slbtnode *ua;
226212715Snwhitehorn	int idx;
227209975Snwhitehorn
228212715Snwhitehorn	ua = pm->pm_slb_tree_root;
229209975Snwhitehorn
230212715Snwhitehorn	for (;;) {
231212715Snwhitehorn		KASSERT(uad_baseok(ua), ("uad base %016jx level %d bad!",
232212715Snwhitehorn		    ua->ua_base, ua->ua_level));
233212715Snwhitehorn		idx = esid2idx(esid, ua->ua_level);
234209975Snwhitehorn
235212715Snwhitehorn		/*
236212715Snwhitehorn		 * This code is specific to ppc64 where a load is
237212715Snwhitehorn		 * atomic, so no need for atomic_load macro.
238212715Snwhitehorn		 */
239212715Snwhitehorn		if (ua->ua_level == UAD_LEAF_LEVEL)
240212715Snwhitehorn			return ((ua->u.slb_entries[idx].slbe & SLBE_VALID) ?
241212715Snwhitehorn			    &ua->u.slb_entries[idx] : NULL);
242212715Snwhitehorn
243291262Snwhitehorn		/*
244291262Snwhitehorn		 * The following accesses are implicitly ordered under the POWER
245291262Snwhitehorn		 * ISA by load dependencies (the store ordering is provided by
246291262Snwhitehorn		 * the powerpc_lwsync() calls elsewhere) and so are run without
247291262Snwhitehorn		 * barriers.
248291262Snwhitehorn		 */
249212715Snwhitehorn		ua = ua->u.ua_child[idx];
250212715Snwhitehorn		if (ua == NULL ||
251212715Snwhitehorn		    esid2base(esid, ua->ua_level) != ua->ua_base)
252212715Snwhitehorn			return (NULL);
253212715Snwhitehorn	}
254212715Snwhitehorn
255212715Snwhitehorn	return (NULL);
256209975Snwhitehorn}
257209975Snwhitehorn
258209975Snwhitehornuint64_t
259209975Snwhitehornva_to_vsid(pmap_t pm, vm_offset_t va)
260209975Snwhitehorn{
261212715Snwhitehorn	struct slb *entry;
262209975Snwhitehorn
263209975Snwhitehorn	/* Shortcut kernel case */
264210704Snwhitehorn	if (pm == kernel_pmap)
265210704Snwhitehorn		return (KERNEL_VSID((uintptr_t)va >> ADDR_SR_SHFT));
266209975Snwhitehorn
267209975Snwhitehorn	/*
268209975Snwhitehorn	 * If there is no vsid for this VA, we need to add a new entry
269209975Snwhitehorn	 * to the PMAP's segment table.
270209975Snwhitehorn	 */
271209975Snwhitehorn
272212715Snwhitehorn	entry = user_va_to_slb_entry(pm, va);
273212715Snwhitehorn
274212715Snwhitehorn	if (entry == NULL)
275212722Snwhitehorn		return (allocate_user_vsid(pm,
276212722Snwhitehorn		    (uintptr_t)va >> ADDR_SR_SHFT, 0));
277209975Snwhitehorn
278212715Snwhitehorn	return ((entry->slbv & SLBV_VSID_MASK) >> SLBV_VSID_SHIFT);
279209975Snwhitehorn}
280209975Snwhitehorn
281209975Snwhitehornuint64_t
282212722Snwhitehornallocate_user_vsid(pmap_t pm, uint64_t esid, int large)
283209975Snwhitehorn{
284212715Snwhitehorn	uint64_t vsid, slbv;
285212715Snwhitehorn	struct slbtnode *ua, *next, *inter;
286212715Snwhitehorn	struct slb *slb;
287212715Snwhitehorn	int idx;
288209975Snwhitehorn
289212715Snwhitehorn	KASSERT(pm != kernel_pmap, ("Attempting to allocate a kernel VSID"));
290209975Snwhitehorn
291212715Snwhitehorn	PMAP_LOCK_ASSERT(pm, MA_OWNED);
292212715Snwhitehorn	vsid = moea64_get_unique_vsid();
293209975Snwhitehorn
294212715Snwhitehorn	slbv = vsid << SLBV_VSID_SHIFT;
295212715Snwhitehorn	if (large)
296212715Snwhitehorn		slbv |= SLBV_L;
297209975Snwhitehorn
298212715Snwhitehorn	ua = pm->pm_slb_tree_root;
299209975Snwhitehorn
300212715Snwhitehorn	/* Descend to the correct leaf or NULL pointer. */
301212715Snwhitehorn	for (;;) {
302212715Snwhitehorn		KASSERT(uad_baseok(ua),
303212715Snwhitehorn		   ("uad base %09jx level %d bad!", ua->ua_base, ua->ua_level));
304212715Snwhitehorn		idx = esid2idx(esid, ua->ua_level);
305209975Snwhitehorn
306212715Snwhitehorn		if (ua->ua_level == UAD_LEAF_LEVEL) {
307212715Snwhitehorn			ua->u.slb_entries[idx].slbv = slbv;
308212715Snwhitehorn			eieio();
309212715Snwhitehorn			ua->u.slb_entries[idx].slbe = (esid << SLBE_ESID_SHIFT)
310212715Snwhitehorn			    | SLBE_VALID;
311212715Snwhitehorn			setbit(&ua->ua_alloc, idx);
312212715Snwhitehorn			slb = &ua->u.slb_entries[idx];
313212715Snwhitehorn			break;
314212715Snwhitehorn		}
315209975Snwhitehorn
316212715Snwhitehorn		next = ua->u.ua_child[idx];
317212715Snwhitehorn		if (next == NULL) {
318212715Snwhitehorn			slb = make_new_leaf(esid, slbv, ua);
319212715Snwhitehorn			break;
320212715Snwhitehorn                }
321212715Snwhitehorn
322212715Snwhitehorn		/*
323212715Snwhitehorn		 * Check if the next item down has an okay ua_base.
324212715Snwhitehorn		 * If not, we need to allocate an intermediate node.
325212715Snwhitehorn		 */
326212715Snwhitehorn		if (esid2base(esid, next->ua_level) != next->ua_base) {
327212715Snwhitehorn			inter = make_intermediate(esid, ua);
328212715Snwhitehorn			slb = make_new_leaf(esid, slbv, inter);
329212715Snwhitehorn			break;
330212715Snwhitehorn		}
331212715Snwhitehorn
332212715Snwhitehorn		ua = next;
333209975Snwhitehorn	}
334209975Snwhitehorn
335209975Snwhitehorn	/*
336209975Snwhitehorn	 * Someone probably wants this soon, and it may be a wired
337209975Snwhitehorn	 * SLB mapping, so pre-spill this entry.
338209975Snwhitehorn	 */
339212715Snwhitehorn	eieio();
340212722Snwhitehorn	slb_insert_user(pm, slb);
341209975Snwhitehorn
342209975Snwhitehorn	return (vsid);
343209975Snwhitehorn}
344209975Snwhitehorn
345212715Snwhitehornvoid
346212715Snwhitehornfree_vsid(pmap_t pm, uint64_t esid, int large)
347212715Snwhitehorn{
348212715Snwhitehorn	struct slbtnode *ua;
349212715Snwhitehorn	int idx;
350212715Snwhitehorn
351212715Snwhitehorn	PMAP_LOCK_ASSERT(pm, MA_OWNED);
352212715Snwhitehorn
353212715Snwhitehorn	ua = pm->pm_slb_tree_root;
354212715Snwhitehorn	/* Descend to the correct leaf. */
355212715Snwhitehorn	for (;;) {
356212715Snwhitehorn		KASSERT(uad_baseok(ua),
357212715Snwhitehorn		   ("uad base %09jx level %d bad!", ua->ua_base, ua->ua_level));
358212715Snwhitehorn
359212715Snwhitehorn		idx = esid2idx(esid, ua->ua_level);
360212715Snwhitehorn		if (ua->ua_level == UAD_LEAF_LEVEL) {
361212715Snwhitehorn			ua->u.slb_entries[idx].slbv = 0;
362212715Snwhitehorn			eieio();
363212715Snwhitehorn			ua->u.slb_entries[idx].slbe = 0;
364212715Snwhitehorn			clrbit(&ua->ua_alloc, idx);
365212715Snwhitehorn			return;
366212715Snwhitehorn		}
367212715Snwhitehorn
368212715Snwhitehorn		ua = ua->u.ua_child[idx];
369212715Snwhitehorn		if (ua == NULL ||
370212715Snwhitehorn		    esid2base(esid, ua->ua_level) != ua->ua_base) {
371212715Snwhitehorn			/* Perhaps just return instead of assert? */
372212715Snwhitehorn			KASSERT(0,
373212715Snwhitehorn			    ("Asked to remove an entry that was never inserted!"));
374212715Snwhitehorn			return;
375212715Snwhitehorn		}
376212715Snwhitehorn	}
377212715Snwhitehorn}
378212715Snwhitehorn
379212715Snwhitehornstatic void
380212715Snwhitehornfree_slb_tree_node(struct slbtnode *ua)
381212715Snwhitehorn{
382212715Snwhitehorn	int idx;
383212715Snwhitehorn
384212715Snwhitehorn	for (idx = 0; idx < 16; idx++) {
385212715Snwhitehorn		if (ua->ua_level != UAD_LEAF_LEVEL) {
386212715Snwhitehorn			if (ua->u.ua_child[idx] != NULL)
387212715Snwhitehorn				free_slb_tree_node(ua->u.ua_child[idx]);
388212715Snwhitehorn		} else {
389212715Snwhitehorn			if (ua->u.slb_entries[idx].slbv != 0)
390212715Snwhitehorn				moea64_release_vsid(ua->u.slb_entries[idx].slbv
391212715Snwhitehorn				    >> SLBV_VSID_SHIFT);
392212715Snwhitehorn		}
393212715Snwhitehorn	}
394212715Snwhitehorn
395212715Snwhitehorn	uma_zfree(slbt_zone, ua);
396212715Snwhitehorn}
397212715Snwhitehorn
398212715Snwhitehornvoid
399212715Snwhitehornslb_free_tree(pmap_t pm)
400212715Snwhitehorn{
401212715Snwhitehorn
402212715Snwhitehorn	free_slb_tree_node(pm->pm_slb_tree_root);
403212715Snwhitehorn}
404212715Snwhitehorn
405212715Snwhitehornstruct slbtnode *
406212715Snwhitehornslb_alloc_tree(void)
407212715Snwhitehorn{
408212715Snwhitehorn	struct slbtnode *root;
409212715Snwhitehorn
410212715Snwhitehorn	root = uma_zalloc(slbt_zone, M_NOWAIT | M_ZERO);
411212715Snwhitehorn	root->ua_level = UAD_ROOT_LEVEL;
412212715Snwhitehorn
413212715Snwhitehorn	return (root);
414212715Snwhitehorn}
415212715Snwhitehorn
416209975Snwhitehorn/* Lock entries mapping kernel text and stacks */
417209975Snwhitehorn
418209975Snwhitehornvoid
419212722Snwhitehornslb_insert_kernel(uint64_t slbe, uint64_t slbv)
420209975Snwhitehorn{
421212722Snwhitehorn	struct slb *slbcache;
422230123Snwhitehorn	int i;
423209975Snwhitehorn
424209975Snwhitehorn	/* We don't want to be preempted while modifying the kernel map */
425209975Snwhitehorn	critical_enter();
426209975Snwhitehorn
427212722Snwhitehorn	slbcache = PCPU_GET(slb);
428209975Snwhitehorn
429214574Snwhitehorn	/* Check for an unused slot, abusing the user slot as a full flag */
430214574Snwhitehorn	if (slbcache[USER_SLB_SLOT].slbe == 0) {
431222620Snwhitehorn		for (i = 0; i < n_slbs; i++) {
432222620Snwhitehorn			if (i == USER_SLB_SLOT)
433222620Snwhitehorn				continue;
434212722Snwhitehorn			if (!(slbcache[i].slbe & SLBE_VALID))
435212722Snwhitehorn				goto fillkernslb;
436212722Snwhitehorn		}
437209975Snwhitehorn
438222620Snwhitehorn		if (i == n_slbs)
439214574Snwhitehorn			slbcache[USER_SLB_SLOT].slbe = 1;
440212722Snwhitehorn	}
441212722Snwhitehorn
442230123Snwhitehorn	i = mftb() % n_slbs;
443230123Snwhitehorn	if (i == USER_SLB_SLOT)
444230123Snwhitehorn			i = (i+1) % n_slbs;
445209975Snwhitehorn
446212722Snwhitehornfillkernslb:
447222620Snwhitehorn	KASSERT(i != USER_SLB_SLOT,
448222620Snwhitehorn	    ("Filling user SLB slot with a kernel mapping"));
449212722Snwhitehorn	slbcache[i].slbv = slbv;
450212722Snwhitehorn	slbcache[i].slbe = slbe | (uint64_t)i;
451209975Snwhitehorn
452209975Snwhitehorn	/* If it is for this CPU, put it in the SLB right away */
453212722Snwhitehorn	if (pmap_bootstrapped) {
454209975Snwhitehorn		/* slbie not required */
455209975Snwhitehorn		__asm __volatile ("slbmte %0, %1" ::
456212722Snwhitehorn		    "r"(slbcache[i].slbv), "r"(slbcache[i].slbe));
457209975Snwhitehorn	}
458209975Snwhitehorn
459209975Snwhitehorn	critical_exit();
460209975Snwhitehorn}
461209975Snwhitehorn
462212722Snwhitehornvoid
463212722Snwhitehornslb_insert_user(pmap_t pm, struct slb *slb)
464212722Snwhitehorn{
465212722Snwhitehorn	int i;
466209975Snwhitehorn
467212722Snwhitehorn	PMAP_LOCK_ASSERT(pm, MA_OWNED);
468212722Snwhitehorn
469222620Snwhitehorn	if (pm->pm_slb_len < n_slbs) {
470212722Snwhitehorn		i = pm->pm_slb_len;
471212722Snwhitehorn		pm->pm_slb_len++;
472212722Snwhitehorn	} else {
473222620Snwhitehorn		i = mftb() % n_slbs;
474212722Snwhitehorn	}
475212722Snwhitehorn
476212722Snwhitehorn	/* Note that this replacement is atomic with respect to trap_subr */
477212722Snwhitehorn	pm->pm_slb[i] = slb;
478212722Snwhitehorn}
479212722Snwhitehorn
480215159Snwhitehornstatic void *
481280957Srstoneslb_uma_real_alloc(uma_zone_t zone, vm_size_t bytes, u_int8_t *flags, int wait)
482215159Snwhitehorn{
483215159Snwhitehorn	static vm_offset_t realmax = 0;
484215159Snwhitehorn	void *va;
485215159Snwhitehorn	vm_page_t m;
486215159Snwhitehorn
487215159Snwhitehorn	if (realmax == 0)
488215159Snwhitehorn		realmax = platform_real_maxaddr();
489215159Snwhitehorn
490215159Snwhitehorn	*flags = UMA_SLAB_PRIV;
491327785Smarkj	m = vm_page_alloc_contig(NULL, 0,
492327785Smarkj	    malloc2vm_flags(wait) | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED,
493327785Smarkj	    1, 0, realmax, PAGE_SIZE, PAGE_SIZE, VM_MEMATTR_DEFAULT);
494327785Smarkj	if (m == NULL)
495327785Smarkj		return (NULL);
496215159Snwhitehorn
497215159Snwhitehorn	va = (void *) VM_PAGE_TO_PHYS(m);
498215159Snwhitehorn
499215159Snwhitehorn	if (!hw_direct_map)
500215159Snwhitehorn		pmap_kenter((vm_offset_t)va, VM_PAGE_TO_PHYS(m));
501215159Snwhitehorn
502215159Snwhitehorn	if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0)
503215159Snwhitehorn		bzero(va, PAGE_SIZE);
504215159Snwhitehorn
505215159Snwhitehorn	return (va);
506215159Snwhitehorn}
507215159Snwhitehorn
508209975Snwhitehornstatic void
509209975Snwhitehornslb_zone_init(void *dummy)
510209975Snwhitehorn{
511209975Snwhitehorn
512212715Snwhitehorn	slbt_zone = uma_zcreate("SLB tree node", sizeof(struct slbtnode),
513209975Snwhitehorn	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM);
514222620Snwhitehorn	slb_cache_zone = uma_zcreate("SLB cache",
515222620Snwhitehorn	    (n_slbs + 1)*sizeof(struct slb *), NULL, NULL, NULL, NULL,
516222620Snwhitehorn	    UMA_ALIGN_PTR, UMA_ZONE_VM);
517215159Snwhitehorn
518215159Snwhitehorn	if (platform_real_maxaddr() != VM_MAX_ADDRESS) {
519215159Snwhitehorn		uma_zone_set_allocf(slb_cache_zone, slb_uma_real_alloc);
520215159Snwhitehorn		uma_zone_set_allocf(slbt_zone, slb_uma_real_alloc);
521215159Snwhitehorn	}
522209975Snwhitehorn}
523209975Snwhitehorn
524212722Snwhitehornstruct slb **
525209975Snwhitehornslb_alloc_user_cache(void)
526209975Snwhitehorn{
527209975Snwhitehorn	return (uma_zalloc(slb_cache_zone, M_ZERO));
528209975Snwhitehorn}
529209975Snwhitehorn
530209975Snwhitehornvoid
531212722Snwhitehornslb_free_user_cache(struct slb **slb)
532209975Snwhitehorn{
533209975Snwhitehorn	uma_zfree(slb_cache_zone, slb);
534209975Snwhitehorn}
535