1209975Snwhitehorn/*- 2209975Snwhitehorn * Copyright (c) 2010 Nathan Whitehorn 3209975Snwhitehorn * All rights reserved. 4209975Snwhitehorn * 5209975Snwhitehorn * Redistribution and use in source and binary forms, with or without 6209975Snwhitehorn * modification, are permitted provided that the following conditions 7209975Snwhitehorn * are met: 8209975Snwhitehorn * 9209975Snwhitehorn * 1. Redistributions of source code must retain the above copyright 10209975Snwhitehorn * notice, this list of conditions and the following disclaimer. 11209975Snwhitehorn * 2. Redistributions in binary form must reproduce the above copyright 12209975Snwhitehorn * notice, this list of conditions and the following disclaimer in the 13209975Snwhitehorn * documentation and/or other materials provided with the distribution. 14209975Snwhitehorn * 15209975Snwhitehorn * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16209975Snwhitehorn * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17209975Snwhitehorn * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18209975Snwhitehorn * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19209975Snwhitehorn * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20209975Snwhitehorn * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21209975Snwhitehorn * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22209975Snwhitehorn * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23209975Snwhitehorn * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24209975Snwhitehorn * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25209975Snwhitehorn * 26209975Snwhitehorn * $FreeBSD: releng/10.2/sys/powerpc/aim/slb.c 243040 2012-11-14 20:01:40Z kib $ 27209975Snwhitehorn */ 28209975Snwhitehorn 29209975Snwhitehorn#include <sys/param.h> 30209975Snwhitehorn#include <sys/kernel.h> 31209975Snwhitehorn#include <sys/lock.h> 32243040Skib#include <sys/malloc.h> 33209975Snwhitehorn#include <sys/mutex.h> 34209975Snwhitehorn#include <sys/proc.h> 35209975Snwhitehorn#include <sys/systm.h> 36209975Snwhitehorn 37209975Snwhitehorn#include <vm/vm.h> 38209975Snwhitehorn#include <vm/pmap.h> 39209975Snwhitehorn#include <vm/uma.h> 40215159Snwhitehorn#include <vm/vm.h> 41209975Snwhitehorn#include <vm/vm_map.h> 42215159Snwhitehorn#include <vm/vm_page.h> 43215159Snwhitehorn#include <vm/vm_pageout.h> 44209975Snwhitehorn 45209975Snwhitehorn#include <machine/md_var.h> 46215159Snwhitehorn#include <machine/platform.h> 47209975Snwhitehorn#include <machine/pmap.h> 48209975Snwhitehorn#include <machine/vmparam.h> 49209975Snwhitehorn 50209975Snwhitehornuintptr_t moea64_get_unique_vsid(void); 51209975Snwhitehornvoid moea64_release_vsid(uint64_t vsid); 52212715Snwhitehornstatic void slb_zone_init(void *); 53209975Snwhitehorn 54222620Snwhitehornstatic uma_zone_t slbt_zone; 55222620Snwhitehornstatic uma_zone_t slb_cache_zone; 56222620Snwhitehornint n_slbs = 64; 57212715Snwhitehorn 58212715SnwhitehornSYSINIT(slb_zone_init, SI_SUB_KMEM, SI_ORDER_ANY, slb_zone_init, NULL); 59212715Snwhitehorn 60212715Snwhitehornstruct slbtnode { 61212715Snwhitehorn uint16_t ua_alloc; 62212715Snwhitehorn uint8_t ua_level; 63212715Snwhitehorn /* Only 36 bits needed for full 64-bit address space. */ 64212715Snwhitehorn uint64_t ua_base; 65212715Snwhitehorn union { 66212715Snwhitehorn struct slbtnode *ua_child[16]; 67212715Snwhitehorn struct slb slb_entries[16]; 68212715Snwhitehorn } u; 69209975Snwhitehorn}; 70209975Snwhitehorn 71212715Snwhitehorn/* 72212715Snwhitehorn * For a full 64-bit address space, there are 36 bits in play in an 73212715Snwhitehorn * esid, so 8 levels, with the leaf being at level 0. 74212715Snwhitehorn * 75212715Snwhitehorn * |3333|3322|2222|2222|1111|1111|11 | | | esid 76212715Snwhitehorn * |5432|1098|7654|3210|9876|5432|1098|7654|3210| bits 77212715Snwhitehorn * +----+----+----+----+----+----+----+----+----+-------- 78212715Snwhitehorn * | 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 | level 79212715Snwhitehorn */ 80212715Snwhitehorn#define UAD_ROOT_LEVEL 8 81212715Snwhitehorn#define UAD_LEAF_LEVEL 0 82209975Snwhitehorn 83212715Snwhitehornstatic inline int 84212715Snwhitehornesid2idx(uint64_t esid, int level) 85212715Snwhitehorn{ 86212715Snwhitehorn int shift; 87209975Snwhitehorn 88212715Snwhitehorn shift = level * 4; 89212715Snwhitehorn return ((esid >> shift) & 0xF); 90212715Snwhitehorn} 91209975Snwhitehorn 92212715Snwhitehorn/* 93212715Snwhitehorn * The ua_base field should have 0 bits after the first 4*(level+1) 94212715Snwhitehorn * bits; i.e. only 95212715Snwhitehorn */ 96212715Snwhitehorn#define uad_baseok(ua) \ 97212715Snwhitehorn (esid2base(ua->ua_base, ua->ua_level) == ua->ua_base) 98209975Snwhitehorn 99212715Snwhitehorn 100212715Snwhitehornstatic inline uint64_t 101212715Snwhitehornesid2base(uint64_t esid, int level) 102209975Snwhitehorn{ 103212715Snwhitehorn uint64_t mask; 104212715Snwhitehorn int shift; 105209975Snwhitehorn 106212715Snwhitehorn shift = (level + 1) * 4; 107212715Snwhitehorn mask = ~((1ULL << shift) - 1); 108212715Snwhitehorn return (esid & mask); 109212715Snwhitehorn} 110212715Snwhitehorn 111212715Snwhitehorn/* 112212715Snwhitehorn * Allocate a new leaf node for the specified esid/vmhandle from the 113212715Snwhitehorn * parent node. 114212715Snwhitehorn */ 115212715Snwhitehornstatic struct slb * 116212715Snwhitehornmake_new_leaf(uint64_t esid, uint64_t slbv, struct slbtnode *parent) 117212715Snwhitehorn{ 118212715Snwhitehorn struct slbtnode *child; 119212715Snwhitehorn struct slb *retval; 120212715Snwhitehorn int idx; 121212715Snwhitehorn 122212715Snwhitehorn idx = esid2idx(esid, parent->ua_level); 123212715Snwhitehorn KASSERT(parent->u.ua_child[idx] == NULL, ("Child already exists!")); 124212715Snwhitehorn 125212715Snwhitehorn /* unlock and M_WAITOK and loop? */ 126212715Snwhitehorn child = uma_zalloc(slbt_zone, M_NOWAIT | M_ZERO); 127212715Snwhitehorn KASSERT(child != NULL, ("unhandled NULL case")); 128212715Snwhitehorn 129212715Snwhitehorn child->ua_level = UAD_LEAF_LEVEL; 130212715Snwhitehorn child->ua_base = esid2base(esid, child->ua_level); 131212715Snwhitehorn idx = esid2idx(esid, child->ua_level); 132212715Snwhitehorn child->u.slb_entries[idx].slbv = slbv; 133212715Snwhitehorn child->u.slb_entries[idx].slbe = (esid << SLBE_ESID_SHIFT) | SLBE_VALID; 134212715Snwhitehorn setbit(&child->ua_alloc, idx); 135212715Snwhitehorn 136212715Snwhitehorn retval = &child->u.slb_entries[idx]; 137212715Snwhitehorn 138212715Snwhitehorn /* 139212715Snwhitehorn * The above stores must be visible before the next one, so 140212715Snwhitehorn * that a lockless searcher always sees a valid path through 141212715Snwhitehorn * the tree. 142212715Snwhitehorn */ 143234745Snwhitehorn mb(); 144212715Snwhitehorn 145212715Snwhitehorn idx = esid2idx(esid, parent->ua_level); 146212715Snwhitehorn parent->u.ua_child[idx] = child; 147212715Snwhitehorn setbit(&parent->ua_alloc, idx); 148212715Snwhitehorn 149212715Snwhitehorn return (retval); 150212715Snwhitehorn} 151212715Snwhitehorn 152212715Snwhitehorn/* 153212715Snwhitehorn * Allocate a new intermediate node to fit between the parent and 154212715Snwhitehorn * esid. 155212715Snwhitehorn */ 156212715Snwhitehornstatic struct slbtnode* 157212715Snwhitehornmake_intermediate(uint64_t esid, struct slbtnode *parent) 158212715Snwhitehorn{ 159212715Snwhitehorn struct slbtnode *child, *inter; 160212715Snwhitehorn int idx, level; 161212715Snwhitehorn 162212715Snwhitehorn idx = esid2idx(esid, parent->ua_level); 163212715Snwhitehorn child = parent->u.ua_child[idx]; 164212715Snwhitehorn KASSERT(esid2base(esid, child->ua_level) != child->ua_base, 165212715Snwhitehorn ("No need for an intermediate node?")); 166212715Snwhitehorn 167212715Snwhitehorn /* 168212715Snwhitehorn * Find the level where the existing child and our new esid 169212715Snwhitehorn * meet. It must be lower than parent->ua_level or we would 170212715Snwhitehorn * have chosen a different index in parent. 171212715Snwhitehorn */ 172212715Snwhitehorn level = child->ua_level + 1; 173212715Snwhitehorn while (esid2base(esid, level) != 174212715Snwhitehorn esid2base(child->ua_base, level)) 175212715Snwhitehorn level++; 176212715Snwhitehorn KASSERT(level < parent->ua_level, 177212715Snwhitehorn ("Found splitting level %d for %09jx and %09jx, " 178212715Snwhitehorn "but it's the same as %p's", 179212715Snwhitehorn level, esid, child->ua_base, parent)); 180212715Snwhitehorn 181212715Snwhitehorn /* unlock and M_WAITOK and loop? */ 182212715Snwhitehorn inter = uma_zalloc(slbt_zone, M_NOWAIT | M_ZERO); 183212715Snwhitehorn KASSERT(inter != NULL, ("unhandled NULL case")); 184212715Snwhitehorn 185212715Snwhitehorn /* Set up intermediate node to point to child ... */ 186212715Snwhitehorn inter->ua_level = level; 187212715Snwhitehorn inter->ua_base = esid2base(esid, inter->ua_level); 188212715Snwhitehorn idx = esid2idx(child->ua_base, inter->ua_level); 189212715Snwhitehorn inter->u.ua_child[idx] = child; 190212715Snwhitehorn setbit(&inter->ua_alloc, idx); 191234745Snwhitehorn mb(); 192212715Snwhitehorn 193212715Snwhitehorn /* Set up parent to point to intermediate node ... */ 194212715Snwhitehorn idx = esid2idx(inter->ua_base, parent->ua_level); 195212715Snwhitehorn parent->u.ua_child[idx] = inter; 196212715Snwhitehorn setbit(&parent->ua_alloc, idx); 197212715Snwhitehorn 198212715Snwhitehorn return (inter); 199212715Snwhitehorn} 200212715Snwhitehorn 201212715Snwhitehornuint64_t 202212715Snwhitehornkernel_va_to_slbv(vm_offset_t va) 203212715Snwhitehorn{ 204217451Sandreast uint64_t slbv; 205212715Snwhitehorn 206212715Snwhitehorn /* Set kernel VSID to deterministic value */ 207214574Snwhitehorn slbv = (KERNEL_VSID((uintptr_t)va >> ADDR_SR_SHFT)) << SLBV_VSID_SHIFT; 208209975Snwhitehorn 209212715Snwhitehorn /* Figure out if this is a large-page mapping */ 210212715Snwhitehorn if (hw_direct_map && va < VM_MIN_KERNEL_ADDRESS) { 211212715Snwhitehorn /* 212212715Snwhitehorn * XXX: If we have set up a direct map, assumes 213212715Snwhitehorn * all physical memory is mapped with large pages. 214212715Snwhitehorn */ 215212715Snwhitehorn if (mem_valid(va, 0) == 0) 216212715Snwhitehorn slbv |= SLBV_L; 217209975Snwhitehorn } 218212715Snwhitehorn 219212715Snwhitehorn return (slbv); 220212715Snwhitehorn} 221209975Snwhitehorn 222212715Snwhitehornstruct slb * 223212715Snwhitehornuser_va_to_slb_entry(pmap_t pm, vm_offset_t va) 224212715Snwhitehorn{ 225212715Snwhitehorn uint64_t esid = va >> ADDR_SR_SHFT; 226212715Snwhitehorn struct slbtnode *ua; 227212715Snwhitehorn int idx; 228209975Snwhitehorn 229212715Snwhitehorn ua = pm->pm_slb_tree_root; 230209975Snwhitehorn 231212715Snwhitehorn for (;;) { 232212715Snwhitehorn KASSERT(uad_baseok(ua), ("uad base %016jx level %d bad!", 233212715Snwhitehorn ua->ua_base, ua->ua_level)); 234212715Snwhitehorn idx = esid2idx(esid, ua->ua_level); 235209975Snwhitehorn 236212715Snwhitehorn /* 237212715Snwhitehorn * This code is specific to ppc64 where a load is 238212715Snwhitehorn * atomic, so no need for atomic_load macro. 239212715Snwhitehorn */ 240212715Snwhitehorn if (ua->ua_level == UAD_LEAF_LEVEL) 241212715Snwhitehorn return ((ua->u.slb_entries[idx].slbe & SLBE_VALID) ? 242212715Snwhitehorn &ua->u.slb_entries[idx] : NULL); 243212715Snwhitehorn 244212715Snwhitehorn ua = ua->u.ua_child[idx]; 245212715Snwhitehorn if (ua == NULL || 246212715Snwhitehorn esid2base(esid, ua->ua_level) != ua->ua_base) 247212715Snwhitehorn return (NULL); 248212715Snwhitehorn } 249212715Snwhitehorn 250212715Snwhitehorn return (NULL); 251209975Snwhitehorn} 252209975Snwhitehorn 253209975Snwhitehornuint64_t 254209975Snwhitehornva_to_vsid(pmap_t pm, vm_offset_t va) 255209975Snwhitehorn{ 256212715Snwhitehorn struct slb *entry; 257209975Snwhitehorn 258209975Snwhitehorn /* Shortcut kernel case */ 259210704Snwhitehorn if (pm == kernel_pmap) 260210704Snwhitehorn return (KERNEL_VSID((uintptr_t)va >> ADDR_SR_SHFT)); 261209975Snwhitehorn 262209975Snwhitehorn /* 263209975Snwhitehorn * If there is no vsid for this VA, we need to add a new entry 264209975Snwhitehorn * to the PMAP's segment table. 265209975Snwhitehorn */ 266209975Snwhitehorn 267212715Snwhitehorn entry = user_va_to_slb_entry(pm, va); 268212715Snwhitehorn 269212715Snwhitehorn if (entry == NULL) 270212722Snwhitehorn return (allocate_user_vsid(pm, 271212722Snwhitehorn (uintptr_t)va >> ADDR_SR_SHFT, 0)); 272209975Snwhitehorn 273212715Snwhitehorn return ((entry->slbv & SLBV_VSID_MASK) >> SLBV_VSID_SHIFT); 274209975Snwhitehorn} 275209975Snwhitehorn 276209975Snwhitehornuint64_t 277212722Snwhitehornallocate_user_vsid(pmap_t pm, uint64_t esid, int large) 278209975Snwhitehorn{ 279212715Snwhitehorn uint64_t vsid, slbv; 280212715Snwhitehorn struct slbtnode *ua, *next, *inter; 281212715Snwhitehorn struct slb *slb; 282212715Snwhitehorn int idx; 283209975Snwhitehorn 284212715Snwhitehorn KASSERT(pm != kernel_pmap, ("Attempting to allocate a kernel VSID")); 285209975Snwhitehorn 286212715Snwhitehorn PMAP_LOCK_ASSERT(pm, MA_OWNED); 287212715Snwhitehorn vsid = moea64_get_unique_vsid(); 288209975Snwhitehorn 289212715Snwhitehorn slbv = vsid << SLBV_VSID_SHIFT; 290212715Snwhitehorn if (large) 291212715Snwhitehorn slbv |= SLBV_L; 292209975Snwhitehorn 293212715Snwhitehorn ua = pm->pm_slb_tree_root; 294209975Snwhitehorn 295212715Snwhitehorn /* Descend to the correct leaf or NULL pointer. */ 296212715Snwhitehorn for (;;) { 297212715Snwhitehorn KASSERT(uad_baseok(ua), 298212715Snwhitehorn ("uad base %09jx level %d bad!", ua->ua_base, ua->ua_level)); 299212715Snwhitehorn idx = esid2idx(esid, ua->ua_level); 300209975Snwhitehorn 301212715Snwhitehorn if (ua->ua_level == UAD_LEAF_LEVEL) { 302212715Snwhitehorn ua->u.slb_entries[idx].slbv = slbv; 303212715Snwhitehorn eieio(); 304212715Snwhitehorn ua->u.slb_entries[idx].slbe = (esid << SLBE_ESID_SHIFT) 305212715Snwhitehorn | SLBE_VALID; 306212715Snwhitehorn setbit(&ua->ua_alloc, idx); 307212715Snwhitehorn slb = &ua->u.slb_entries[idx]; 308212715Snwhitehorn break; 309212715Snwhitehorn } 310209975Snwhitehorn 311212715Snwhitehorn next = ua->u.ua_child[idx]; 312212715Snwhitehorn if (next == NULL) { 313212715Snwhitehorn slb = make_new_leaf(esid, slbv, ua); 314212715Snwhitehorn break; 315212715Snwhitehorn } 316212715Snwhitehorn 317212715Snwhitehorn /* 318212715Snwhitehorn * Check if the next item down has an okay ua_base. 319212715Snwhitehorn * If not, we need to allocate an intermediate node. 320212715Snwhitehorn */ 321212715Snwhitehorn if (esid2base(esid, next->ua_level) != next->ua_base) { 322212715Snwhitehorn inter = make_intermediate(esid, ua); 323212715Snwhitehorn slb = make_new_leaf(esid, slbv, inter); 324212715Snwhitehorn break; 325212715Snwhitehorn } 326212715Snwhitehorn 327212715Snwhitehorn ua = next; 328209975Snwhitehorn } 329209975Snwhitehorn 330209975Snwhitehorn /* 331209975Snwhitehorn * Someone probably wants this soon, and it may be a wired 332209975Snwhitehorn * SLB mapping, so pre-spill this entry. 333209975Snwhitehorn */ 334212715Snwhitehorn eieio(); 335212722Snwhitehorn slb_insert_user(pm, slb); 336209975Snwhitehorn 337209975Snwhitehorn return (vsid); 338209975Snwhitehorn} 339209975Snwhitehorn 340212715Snwhitehornvoid 341212715Snwhitehornfree_vsid(pmap_t pm, uint64_t esid, int large) 342212715Snwhitehorn{ 343212715Snwhitehorn struct slbtnode *ua; 344212715Snwhitehorn int idx; 345212715Snwhitehorn 346212715Snwhitehorn PMAP_LOCK_ASSERT(pm, MA_OWNED); 347212715Snwhitehorn 348212715Snwhitehorn ua = pm->pm_slb_tree_root; 349212715Snwhitehorn /* Descend to the correct leaf. */ 350212715Snwhitehorn for (;;) { 351212715Snwhitehorn KASSERT(uad_baseok(ua), 352212715Snwhitehorn ("uad base %09jx level %d bad!", ua->ua_base, ua->ua_level)); 353212715Snwhitehorn 354212715Snwhitehorn idx = esid2idx(esid, ua->ua_level); 355212715Snwhitehorn if (ua->ua_level == UAD_LEAF_LEVEL) { 356212715Snwhitehorn ua->u.slb_entries[idx].slbv = 0; 357212715Snwhitehorn eieio(); 358212715Snwhitehorn ua->u.slb_entries[idx].slbe = 0; 359212715Snwhitehorn clrbit(&ua->ua_alloc, idx); 360212715Snwhitehorn return; 361212715Snwhitehorn } 362212715Snwhitehorn 363212715Snwhitehorn ua = ua->u.ua_child[idx]; 364212715Snwhitehorn if (ua == NULL || 365212715Snwhitehorn esid2base(esid, ua->ua_level) != ua->ua_base) { 366212715Snwhitehorn /* Perhaps just return instead of assert? */ 367212715Snwhitehorn KASSERT(0, 368212715Snwhitehorn ("Asked to remove an entry that was never inserted!")); 369212715Snwhitehorn return; 370212715Snwhitehorn } 371212715Snwhitehorn } 372212715Snwhitehorn} 373212715Snwhitehorn 374212715Snwhitehornstatic void 375212715Snwhitehornfree_slb_tree_node(struct slbtnode *ua) 376212715Snwhitehorn{ 377212715Snwhitehorn int idx; 378212715Snwhitehorn 379212715Snwhitehorn for (idx = 0; idx < 16; idx++) { 380212715Snwhitehorn if (ua->ua_level != UAD_LEAF_LEVEL) { 381212715Snwhitehorn if (ua->u.ua_child[idx] != NULL) 382212715Snwhitehorn free_slb_tree_node(ua->u.ua_child[idx]); 383212715Snwhitehorn } else { 384212715Snwhitehorn if (ua->u.slb_entries[idx].slbv != 0) 385212715Snwhitehorn moea64_release_vsid(ua->u.slb_entries[idx].slbv 386212715Snwhitehorn >> SLBV_VSID_SHIFT); 387212715Snwhitehorn } 388212715Snwhitehorn } 389212715Snwhitehorn 390212715Snwhitehorn uma_zfree(slbt_zone, ua); 391212715Snwhitehorn} 392212715Snwhitehorn 393212715Snwhitehornvoid 394212715Snwhitehornslb_free_tree(pmap_t pm) 395212715Snwhitehorn{ 396212715Snwhitehorn 397212715Snwhitehorn free_slb_tree_node(pm->pm_slb_tree_root); 398212715Snwhitehorn} 399212715Snwhitehorn 400212715Snwhitehornstruct slbtnode * 401212715Snwhitehornslb_alloc_tree(void) 402212715Snwhitehorn{ 403212715Snwhitehorn struct slbtnode *root; 404212715Snwhitehorn 405212715Snwhitehorn root = uma_zalloc(slbt_zone, M_NOWAIT | M_ZERO); 406212715Snwhitehorn root->ua_level = UAD_ROOT_LEVEL; 407212715Snwhitehorn 408212715Snwhitehorn return (root); 409212715Snwhitehorn} 410212715Snwhitehorn 411209975Snwhitehorn/* Lock entries mapping kernel text and stacks */ 412209975Snwhitehorn 413209975Snwhitehornvoid 414212722Snwhitehornslb_insert_kernel(uint64_t slbe, uint64_t slbv) 415209975Snwhitehorn{ 416212722Snwhitehorn struct slb *slbcache; 417230123Snwhitehorn int i; 418209975Snwhitehorn 419209975Snwhitehorn /* We don't want to be preempted while modifying the kernel map */ 420209975Snwhitehorn critical_enter(); 421209975Snwhitehorn 422212722Snwhitehorn slbcache = PCPU_GET(slb); 423209975Snwhitehorn 424214574Snwhitehorn /* Check for an unused slot, abusing the user slot as a full flag */ 425214574Snwhitehorn if (slbcache[USER_SLB_SLOT].slbe == 0) { 426222620Snwhitehorn for (i = 0; i < n_slbs; i++) { 427222620Snwhitehorn if (i == USER_SLB_SLOT) 428222620Snwhitehorn continue; 429212722Snwhitehorn if (!(slbcache[i].slbe & SLBE_VALID)) 430212722Snwhitehorn goto fillkernslb; 431212722Snwhitehorn } 432209975Snwhitehorn 433222620Snwhitehorn if (i == n_slbs) 434214574Snwhitehorn slbcache[USER_SLB_SLOT].slbe = 1; 435212722Snwhitehorn } 436212722Snwhitehorn 437230123Snwhitehorn i = mftb() % n_slbs; 438230123Snwhitehorn if (i == USER_SLB_SLOT) 439230123Snwhitehorn i = (i+1) % n_slbs; 440209975Snwhitehorn 441212722Snwhitehornfillkernslb: 442222620Snwhitehorn KASSERT(i != USER_SLB_SLOT, 443222620Snwhitehorn ("Filling user SLB slot with a kernel mapping")); 444212722Snwhitehorn slbcache[i].slbv = slbv; 445212722Snwhitehorn slbcache[i].slbe = slbe | (uint64_t)i; 446209975Snwhitehorn 447209975Snwhitehorn /* If it is for this CPU, put it in the SLB right away */ 448212722Snwhitehorn if (pmap_bootstrapped) { 449209975Snwhitehorn /* slbie not required */ 450209975Snwhitehorn __asm __volatile ("slbmte %0, %1" :: 451212722Snwhitehorn "r"(slbcache[i].slbv), "r"(slbcache[i].slbe)); 452209975Snwhitehorn } 453209975Snwhitehorn 454209975Snwhitehorn critical_exit(); 455209975Snwhitehorn} 456209975Snwhitehorn 457212722Snwhitehornvoid 458212722Snwhitehornslb_insert_user(pmap_t pm, struct slb *slb) 459212722Snwhitehorn{ 460212722Snwhitehorn int i; 461209975Snwhitehorn 462212722Snwhitehorn PMAP_LOCK_ASSERT(pm, MA_OWNED); 463212722Snwhitehorn 464222620Snwhitehorn if (pm->pm_slb_len < n_slbs) { 465212722Snwhitehorn i = pm->pm_slb_len; 466212722Snwhitehorn pm->pm_slb_len++; 467212722Snwhitehorn } else { 468222620Snwhitehorn i = mftb() % n_slbs; 469212722Snwhitehorn } 470212722Snwhitehorn 471212722Snwhitehorn /* Note that this replacement is atomic with respect to trap_subr */ 472212722Snwhitehorn pm->pm_slb[i] = slb; 473212722Snwhitehorn} 474212722Snwhitehorn 475215159Snwhitehornstatic void * 476215159Snwhitehornslb_uma_real_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait) 477215159Snwhitehorn{ 478215159Snwhitehorn static vm_offset_t realmax = 0; 479215159Snwhitehorn void *va; 480215159Snwhitehorn vm_page_t m; 481227568Salc int pflags; 482215159Snwhitehorn 483215159Snwhitehorn if (realmax == 0) 484215159Snwhitehorn realmax = platform_real_maxaddr(); 485215159Snwhitehorn 486215159Snwhitehorn *flags = UMA_SLAB_PRIV; 487243040Skib pflags = malloc2vm_flags(wait) | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED; 488215159Snwhitehorn 489215159Snwhitehorn for (;;) { 490227568Salc m = vm_page_alloc_contig(NULL, 0, pflags, 1, 0, realmax, 491227568Salc PAGE_SIZE, PAGE_SIZE, VM_MEMATTR_DEFAULT); 492215159Snwhitehorn if (m == NULL) { 493215159Snwhitehorn if (wait & M_NOWAIT) 494215159Snwhitehorn return (NULL); 495215159Snwhitehorn VM_WAIT; 496215159Snwhitehorn } else 497215159Snwhitehorn break; 498215159Snwhitehorn } 499215159Snwhitehorn 500215159Snwhitehorn va = (void *) VM_PAGE_TO_PHYS(m); 501215159Snwhitehorn 502215159Snwhitehorn if (!hw_direct_map) 503215159Snwhitehorn pmap_kenter((vm_offset_t)va, VM_PAGE_TO_PHYS(m)); 504215159Snwhitehorn 505215159Snwhitehorn if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0) 506215159Snwhitehorn bzero(va, PAGE_SIZE); 507215159Snwhitehorn 508215159Snwhitehorn return (va); 509215159Snwhitehorn} 510215159Snwhitehorn 511209975Snwhitehornstatic void 512209975Snwhitehornslb_zone_init(void *dummy) 513209975Snwhitehorn{ 514209975Snwhitehorn 515212715Snwhitehorn slbt_zone = uma_zcreate("SLB tree node", sizeof(struct slbtnode), 516209975Snwhitehorn NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM); 517222620Snwhitehorn slb_cache_zone = uma_zcreate("SLB cache", 518222620Snwhitehorn (n_slbs + 1)*sizeof(struct slb *), NULL, NULL, NULL, NULL, 519222620Snwhitehorn UMA_ALIGN_PTR, UMA_ZONE_VM); 520215159Snwhitehorn 521215159Snwhitehorn if (platform_real_maxaddr() != VM_MAX_ADDRESS) { 522215159Snwhitehorn uma_zone_set_allocf(slb_cache_zone, slb_uma_real_alloc); 523215159Snwhitehorn uma_zone_set_allocf(slbt_zone, slb_uma_real_alloc); 524215159Snwhitehorn } 525209975Snwhitehorn} 526209975Snwhitehorn 527212722Snwhitehornstruct slb ** 528209975Snwhitehornslb_alloc_user_cache(void) 529209975Snwhitehorn{ 530209975Snwhitehorn return (uma_zalloc(slb_cache_zone, M_ZERO)); 531209975Snwhitehorn} 532209975Snwhitehorn 533209975Snwhitehornvoid 534212722Snwhitehornslb_free_user_cache(struct slb **slb) 535209975Snwhitehorn{ 536209975Snwhitehorn uma_zfree(slb_cache_zone, slb); 537209975Snwhitehorn} 538