slb.c revision 214574
1209975Snwhitehorn/*- 2209975Snwhitehorn * Copyright (c) 2010 Nathan Whitehorn 3209975Snwhitehorn * All rights reserved. 4209975Snwhitehorn * 5209975Snwhitehorn * Redistribution and use in source and binary forms, with or without 6209975Snwhitehorn * modification, are permitted provided that the following conditions 7209975Snwhitehorn * are met: 8209975Snwhitehorn * 9209975Snwhitehorn * 1. Redistributions of source code must retain the above copyright 10209975Snwhitehorn * notice, this list of conditions and the following disclaimer. 11209975Snwhitehorn * 2. Redistributions in binary form must reproduce the above copyright 12209975Snwhitehorn * notice, this list of conditions and the following disclaimer in the 13209975Snwhitehorn * documentation and/or other materials provided with the distribution. 14209975Snwhitehorn * 15209975Snwhitehorn * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16209975Snwhitehorn * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17209975Snwhitehorn * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18209975Snwhitehorn * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19209975Snwhitehorn * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20209975Snwhitehorn * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21209975Snwhitehorn * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22209975Snwhitehorn * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23209975Snwhitehorn * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24209975Snwhitehorn * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25209975Snwhitehorn * 26209975Snwhitehorn * $FreeBSD: head/sys/powerpc/aim/slb.c 214574 2010-10-30 23:07:30Z nwhitehorn $ 27209975Snwhitehorn */ 28209975Snwhitehorn 29209975Snwhitehorn#include <sys/param.h> 30209975Snwhitehorn#include <sys/kernel.h> 31209975Snwhitehorn#include <sys/lock.h> 32209975Snwhitehorn#include <sys/mutex.h> 33209975Snwhitehorn#include <sys/proc.h> 34209975Snwhitehorn#include <sys/systm.h> 35209975Snwhitehorn 36209975Snwhitehorn#include <vm/vm.h> 37209975Snwhitehorn#include <vm/pmap.h> 38209975Snwhitehorn#include <vm/uma.h> 39209975Snwhitehorn#include <vm/vm_map.h> 40209975Snwhitehorn 41209975Snwhitehorn#include <machine/md_var.h> 42209975Snwhitehorn#include <machine/pmap.h> 43209975Snwhitehorn#include <machine/vmparam.h> 44209975Snwhitehorn 45209975Snwhitehornuintptr_t moea64_get_unique_vsid(void); 46209975Snwhitehornvoid moea64_release_vsid(uint64_t vsid); 47212715Snwhitehornstatic void slb_zone_init(void *); 48209975Snwhitehorn 49212715Snwhitehornuma_zone_t slbt_zone; 50212715Snwhitehornuma_zone_t slb_cache_zone; 51212715Snwhitehorn 52212715SnwhitehornSYSINIT(slb_zone_init, SI_SUB_KMEM, SI_ORDER_ANY, slb_zone_init, NULL); 53212715Snwhitehorn 54212715Snwhitehornstruct slbtnode { 55212715Snwhitehorn uint16_t ua_alloc; 56212715Snwhitehorn uint8_t ua_level; 57212715Snwhitehorn /* Only 36 bits needed for full 64-bit address space. */ 58212715Snwhitehorn uint64_t ua_base; 59212715Snwhitehorn union { 60212715Snwhitehorn struct slbtnode *ua_child[16]; 61212715Snwhitehorn struct slb slb_entries[16]; 62212715Snwhitehorn } u; 63209975Snwhitehorn}; 64209975Snwhitehorn 65212715Snwhitehorn/* 66212715Snwhitehorn * For a full 64-bit address space, there are 36 bits in play in an 67212715Snwhitehorn * esid, so 8 levels, with the leaf being at level 0. 68212715Snwhitehorn * 69212715Snwhitehorn * |3333|3322|2222|2222|1111|1111|11 | | | esid 70212715Snwhitehorn * |5432|1098|7654|3210|9876|5432|1098|7654|3210| bits 71212715Snwhitehorn * +----+----+----+----+----+----+----+----+----+-------- 72212715Snwhitehorn * | 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 | level 73212715Snwhitehorn */ 74212715Snwhitehorn#define UAD_ROOT_LEVEL 8 75212715Snwhitehorn#define UAD_LEAF_LEVEL 0 76209975Snwhitehorn 77212715Snwhitehornstatic inline int 78212715Snwhitehornesid2idx(uint64_t esid, int level) 79212715Snwhitehorn{ 80212715Snwhitehorn int shift; 81209975Snwhitehorn 82212715Snwhitehorn shift = level * 4; 83212715Snwhitehorn return ((esid >> shift) & 0xF); 84212715Snwhitehorn} 85209975Snwhitehorn 86212715Snwhitehorn/* 87212715Snwhitehorn * The ua_base field should have 0 bits after the first 4*(level+1) 88212715Snwhitehorn * bits; i.e. only 89212715Snwhitehorn */ 90212715Snwhitehorn#define uad_baseok(ua) \ 91212715Snwhitehorn (esid2base(ua->ua_base, ua->ua_level) == ua->ua_base) 92209975Snwhitehorn 93212715Snwhitehorn 94212715Snwhitehornstatic inline uint64_t 95212715Snwhitehornesid2base(uint64_t esid, int level) 96209975Snwhitehorn{ 97212715Snwhitehorn uint64_t mask; 98212715Snwhitehorn int shift; 99209975Snwhitehorn 100212715Snwhitehorn shift = (level + 1) * 4; 101212715Snwhitehorn mask = ~((1ULL << shift) - 1); 102212715Snwhitehorn return (esid & mask); 103212715Snwhitehorn} 104212715Snwhitehorn 105212715Snwhitehorn/* 106212715Snwhitehorn * Allocate a new leaf node for the specified esid/vmhandle from the 107212715Snwhitehorn * parent node. 108212715Snwhitehorn */ 109212715Snwhitehornstatic struct slb * 110212715Snwhitehornmake_new_leaf(uint64_t esid, uint64_t slbv, struct slbtnode *parent) 111212715Snwhitehorn{ 112212715Snwhitehorn struct slbtnode *child; 113212715Snwhitehorn struct slb *retval; 114212715Snwhitehorn int idx; 115212715Snwhitehorn 116212715Snwhitehorn idx = esid2idx(esid, parent->ua_level); 117212715Snwhitehorn KASSERT(parent->u.ua_child[idx] == NULL, ("Child already exists!")); 118212715Snwhitehorn 119212715Snwhitehorn /* unlock and M_WAITOK and loop? */ 120212715Snwhitehorn child = uma_zalloc(slbt_zone, M_NOWAIT | M_ZERO); 121212715Snwhitehorn KASSERT(child != NULL, ("unhandled NULL case")); 122212715Snwhitehorn 123212715Snwhitehorn child->ua_level = UAD_LEAF_LEVEL; 124212715Snwhitehorn child->ua_base = esid2base(esid, child->ua_level); 125212715Snwhitehorn idx = esid2idx(esid, child->ua_level); 126212715Snwhitehorn child->u.slb_entries[idx].slbv = slbv; 127212715Snwhitehorn child->u.slb_entries[idx].slbe = (esid << SLBE_ESID_SHIFT) | SLBE_VALID; 128212715Snwhitehorn setbit(&child->ua_alloc, idx); 129212715Snwhitehorn 130212715Snwhitehorn retval = &child->u.slb_entries[idx]; 131212715Snwhitehorn 132212715Snwhitehorn /* 133212715Snwhitehorn * The above stores must be visible before the next one, so 134212715Snwhitehorn * that a lockless searcher always sees a valid path through 135212715Snwhitehorn * the tree. 136212715Snwhitehorn */ 137212715Snwhitehorn powerpc_sync(); 138212715Snwhitehorn 139212715Snwhitehorn idx = esid2idx(esid, parent->ua_level); 140212715Snwhitehorn parent->u.ua_child[idx] = child; 141212715Snwhitehorn setbit(&parent->ua_alloc, idx); 142212715Snwhitehorn 143212715Snwhitehorn return (retval); 144212715Snwhitehorn} 145212715Snwhitehorn 146212715Snwhitehorn/* 147212715Snwhitehorn * Allocate a new intermediate node to fit between the parent and 148212715Snwhitehorn * esid. 149212715Snwhitehorn */ 150212715Snwhitehornstatic struct slbtnode* 151212715Snwhitehornmake_intermediate(uint64_t esid, struct slbtnode *parent) 152212715Snwhitehorn{ 153212715Snwhitehorn struct slbtnode *child, *inter; 154212715Snwhitehorn int idx, level; 155212715Snwhitehorn 156212715Snwhitehorn idx = esid2idx(esid, parent->ua_level); 157212715Snwhitehorn child = parent->u.ua_child[idx]; 158212715Snwhitehorn KASSERT(esid2base(esid, child->ua_level) != child->ua_base, 159212715Snwhitehorn ("No need for an intermediate node?")); 160212715Snwhitehorn 161212715Snwhitehorn /* 162212715Snwhitehorn * Find the level where the existing child and our new esid 163212715Snwhitehorn * meet. It must be lower than parent->ua_level or we would 164212715Snwhitehorn * have chosen a different index in parent. 165212715Snwhitehorn */ 166212715Snwhitehorn level = child->ua_level + 1; 167212715Snwhitehorn while (esid2base(esid, level) != 168212715Snwhitehorn esid2base(child->ua_base, level)) 169212715Snwhitehorn level++; 170212715Snwhitehorn KASSERT(level < parent->ua_level, 171212715Snwhitehorn ("Found splitting level %d for %09jx and %09jx, " 172212715Snwhitehorn "but it's the same as %p's", 173212715Snwhitehorn level, esid, child->ua_base, parent)); 174212715Snwhitehorn 175212715Snwhitehorn /* unlock and M_WAITOK and loop? */ 176212715Snwhitehorn inter = uma_zalloc(slbt_zone, M_NOWAIT | M_ZERO); 177212715Snwhitehorn KASSERT(inter != NULL, ("unhandled NULL case")); 178212715Snwhitehorn 179212715Snwhitehorn /* Set up intermediate node to point to child ... */ 180212715Snwhitehorn inter->ua_level = level; 181212715Snwhitehorn inter->ua_base = esid2base(esid, inter->ua_level); 182212715Snwhitehorn idx = esid2idx(child->ua_base, inter->ua_level); 183212715Snwhitehorn inter->u.ua_child[idx] = child; 184212715Snwhitehorn setbit(&inter->ua_alloc, idx); 185212715Snwhitehorn powerpc_sync(); 186212715Snwhitehorn 187212715Snwhitehorn /* Set up parent to point to intermediate node ... */ 188212715Snwhitehorn idx = esid2idx(inter->ua_base, parent->ua_level); 189212715Snwhitehorn parent->u.ua_child[idx] = inter; 190212715Snwhitehorn setbit(&parent->ua_alloc, idx); 191212715Snwhitehorn 192212715Snwhitehorn return (inter); 193212715Snwhitehorn} 194212715Snwhitehorn 195212715Snwhitehornuint64_t 196212715Snwhitehornkernel_va_to_slbv(vm_offset_t va) 197212715Snwhitehorn{ 198212715Snwhitehorn uint64_t esid, slbv; 199212715Snwhitehorn 200209975Snwhitehorn esid = (uintptr_t)va >> ADDR_SR_SHFT; 201209975Snwhitehorn 202212715Snwhitehorn /* Set kernel VSID to deterministic value */ 203214574Snwhitehorn slbv = (KERNEL_VSID((uintptr_t)va >> ADDR_SR_SHFT)) << SLBV_VSID_SHIFT; 204209975Snwhitehorn 205212715Snwhitehorn /* Figure out if this is a large-page mapping */ 206212715Snwhitehorn if (hw_direct_map && va < VM_MIN_KERNEL_ADDRESS) { 207212715Snwhitehorn /* 208212715Snwhitehorn * XXX: If we have set up a direct map, assumes 209212715Snwhitehorn * all physical memory is mapped with large pages. 210212715Snwhitehorn */ 211212715Snwhitehorn if (mem_valid(va, 0) == 0) 212212715Snwhitehorn slbv |= SLBV_L; 213209975Snwhitehorn } 214212715Snwhitehorn 215212715Snwhitehorn return (slbv); 216212715Snwhitehorn} 217209975Snwhitehorn 218212715Snwhitehornstruct slb * 219212715Snwhitehornuser_va_to_slb_entry(pmap_t pm, vm_offset_t va) 220212715Snwhitehorn{ 221212715Snwhitehorn uint64_t esid = va >> ADDR_SR_SHFT; 222212715Snwhitehorn struct slbtnode *ua; 223212715Snwhitehorn int idx; 224209975Snwhitehorn 225212715Snwhitehorn ua = pm->pm_slb_tree_root; 226209975Snwhitehorn 227212715Snwhitehorn for (;;) { 228212715Snwhitehorn KASSERT(uad_baseok(ua), ("uad base %016jx level %d bad!", 229212715Snwhitehorn ua->ua_base, ua->ua_level)); 230212715Snwhitehorn idx = esid2idx(esid, ua->ua_level); 231209975Snwhitehorn 232212715Snwhitehorn /* 233212715Snwhitehorn * This code is specific to ppc64 where a load is 234212715Snwhitehorn * atomic, so no need for atomic_load macro. 235212715Snwhitehorn */ 236212715Snwhitehorn if (ua->ua_level == UAD_LEAF_LEVEL) 237212715Snwhitehorn return ((ua->u.slb_entries[idx].slbe & SLBE_VALID) ? 238212715Snwhitehorn &ua->u.slb_entries[idx] : NULL); 239212715Snwhitehorn 240212715Snwhitehorn ua = ua->u.ua_child[idx]; 241212715Snwhitehorn if (ua == NULL || 242212715Snwhitehorn esid2base(esid, ua->ua_level) != ua->ua_base) 243212715Snwhitehorn return (NULL); 244212715Snwhitehorn } 245212715Snwhitehorn 246212715Snwhitehorn return (NULL); 247209975Snwhitehorn} 248209975Snwhitehorn 249209975Snwhitehornuint64_t 250209975Snwhitehornva_to_vsid(pmap_t pm, vm_offset_t va) 251209975Snwhitehorn{ 252212715Snwhitehorn struct slb *entry; 253209975Snwhitehorn 254209975Snwhitehorn /* Shortcut kernel case */ 255210704Snwhitehorn if (pm == kernel_pmap) 256210704Snwhitehorn return (KERNEL_VSID((uintptr_t)va >> ADDR_SR_SHFT)); 257209975Snwhitehorn 258209975Snwhitehorn /* 259209975Snwhitehorn * If there is no vsid for this VA, we need to add a new entry 260209975Snwhitehorn * to the PMAP's segment table. 261209975Snwhitehorn */ 262209975Snwhitehorn 263212715Snwhitehorn entry = user_va_to_slb_entry(pm, va); 264212715Snwhitehorn 265212715Snwhitehorn if (entry == NULL) 266212722Snwhitehorn return (allocate_user_vsid(pm, 267212722Snwhitehorn (uintptr_t)va >> ADDR_SR_SHFT, 0)); 268209975Snwhitehorn 269212715Snwhitehorn return ((entry->slbv & SLBV_VSID_MASK) >> SLBV_VSID_SHIFT); 270209975Snwhitehorn} 271209975Snwhitehorn 272209975Snwhitehornuint64_t 273212722Snwhitehornallocate_user_vsid(pmap_t pm, uint64_t esid, int large) 274209975Snwhitehorn{ 275212715Snwhitehorn uint64_t vsid, slbv; 276212715Snwhitehorn struct slbtnode *ua, *next, *inter; 277212715Snwhitehorn struct slb *slb; 278212715Snwhitehorn int idx; 279209975Snwhitehorn 280212715Snwhitehorn KASSERT(pm != kernel_pmap, ("Attempting to allocate a kernel VSID")); 281209975Snwhitehorn 282212715Snwhitehorn PMAP_LOCK_ASSERT(pm, MA_OWNED); 283212715Snwhitehorn vsid = moea64_get_unique_vsid(); 284209975Snwhitehorn 285212715Snwhitehorn slbv = vsid << SLBV_VSID_SHIFT; 286212715Snwhitehorn if (large) 287212715Snwhitehorn slbv |= SLBV_L; 288209975Snwhitehorn 289212715Snwhitehorn ua = pm->pm_slb_tree_root; 290209975Snwhitehorn 291212715Snwhitehorn /* Descend to the correct leaf or NULL pointer. */ 292212715Snwhitehorn for (;;) { 293212715Snwhitehorn KASSERT(uad_baseok(ua), 294212715Snwhitehorn ("uad base %09jx level %d bad!", ua->ua_base, ua->ua_level)); 295212715Snwhitehorn idx = esid2idx(esid, ua->ua_level); 296209975Snwhitehorn 297212715Snwhitehorn if (ua->ua_level == UAD_LEAF_LEVEL) { 298212715Snwhitehorn ua->u.slb_entries[idx].slbv = slbv; 299212715Snwhitehorn eieio(); 300212715Snwhitehorn ua->u.slb_entries[idx].slbe = (esid << SLBE_ESID_SHIFT) 301212715Snwhitehorn | SLBE_VALID; 302212715Snwhitehorn setbit(&ua->ua_alloc, idx); 303212715Snwhitehorn slb = &ua->u.slb_entries[idx]; 304212715Snwhitehorn break; 305212715Snwhitehorn } 306209975Snwhitehorn 307212715Snwhitehorn next = ua->u.ua_child[idx]; 308212715Snwhitehorn if (next == NULL) { 309212715Snwhitehorn slb = make_new_leaf(esid, slbv, ua); 310212715Snwhitehorn break; 311212715Snwhitehorn } 312212715Snwhitehorn 313212715Snwhitehorn /* 314212715Snwhitehorn * Check if the next item down has an okay ua_base. 315212715Snwhitehorn * If not, we need to allocate an intermediate node. 316212715Snwhitehorn */ 317212715Snwhitehorn if (esid2base(esid, next->ua_level) != next->ua_base) { 318212715Snwhitehorn inter = make_intermediate(esid, ua); 319212715Snwhitehorn slb = make_new_leaf(esid, slbv, inter); 320212715Snwhitehorn break; 321212715Snwhitehorn } 322212715Snwhitehorn 323212715Snwhitehorn ua = next; 324209975Snwhitehorn } 325209975Snwhitehorn 326209975Snwhitehorn /* 327209975Snwhitehorn * Someone probably wants this soon, and it may be a wired 328209975Snwhitehorn * SLB mapping, so pre-spill this entry. 329209975Snwhitehorn */ 330212715Snwhitehorn eieio(); 331212722Snwhitehorn slb_insert_user(pm, slb); 332209975Snwhitehorn 333209975Snwhitehorn return (vsid); 334209975Snwhitehorn} 335209975Snwhitehorn 336212715Snwhitehornvoid 337212715Snwhitehornfree_vsid(pmap_t pm, uint64_t esid, int large) 338212715Snwhitehorn{ 339212715Snwhitehorn struct slbtnode *ua; 340212715Snwhitehorn int idx; 341212715Snwhitehorn 342212715Snwhitehorn PMAP_LOCK_ASSERT(pm, MA_OWNED); 343212715Snwhitehorn 344212715Snwhitehorn ua = pm->pm_slb_tree_root; 345212715Snwhitehorn /* Descend to the correct leaf. */ 346212715Snwhitehorn for (;;) { 347212715Snwhitehorn KASSERT(uad_baseok(ua), 348212715Snwhitehorn ("uad base %09jx level %d bad!", ua->ua_base, ua->ua_level)); 349212715Snwhitehorn 350212715Snwhitehorn idx = esid2idx(esid, ua->ua_level); 351212715Snwhitehorn if (ua->ua_level == UAD_LEAF_LEVEL) { 352212715Snwhitehorn ua->u.slb_entries[idx].slbv = 0; 353212715Snwhitehorn eieio(); 354212715Snwhitehorn ua->u.slb_entries[idx].slbe = 0; 355212715Snwhitehorn clrbit(&ua->ua_alloc, idx); 356212715Snwhitehorn return; 357212715Snwhitehorn } 358212715Snwhitehorn 359212715Snwhitehorn ua = ua->u.ua_child[idx]; 360212715Snwhitehorn if (ua == NULL || 361212715Snwhitehorn esid2base(esid, ua->ua_level) != ua->ua_base) { 362212715Snwhitehorn /* Perhaps just return instead of assert? */ 363212715Snwhitehorn KASSERT(0, 364212715Snwhitehorn ("Asked to remove an entry that was never inserted!")); 365212715Snwhitehorn return; 366212715Snwhitehorn } 367212715Snwhitehorn } 368212715Snwhitehorn} 369212715Snwhitehorn 370212715Snwhitehornstatic void 371212715Snwhitehornfree_slb_tree_node(struct slbtnode *ua) 372212715Snwhitehorn{ 373212715Snwhitehorn int idx; 374212715Snwhitehorn 375212715Snwhitehorn for (idx = 0; idx < 16; idx++) { 376212715Snwhitehorn if (ua->ua_level != UAD_LEAF_LEVEL) { 377212715Snwhitehorn if (ua->u.ua_child[idx] != NULL) 378212715Snwhitehorn free_slb_tree_node(ua->u.ua_child[idx]); 379212715Snwhitehorn } else { 380212715Snwhitehorn if (ua->u.slb_entries[idx].slbv != 0) 381212715Snwhitehorn moea64_release_vsid(ua->u.slb_entries[idx].slbv 382212715Snwhitehorn >> SLBV_VSID_SHIFT); 383212715Snwhitehorn } 384212715Snwhitehorn } 385212715Snwhitehorn 386212715Snwhitehorn uma_zfree(slbt_zone, ua); 387212715Snwhitehorn} 388212715Snwhitehorn 389212715Snwhitehornvoid 390212715Snwhitehornslb_free_tree(pmap_t pm) 391212715Snwhitehorn{ 392212715Snwhitehorn 393212715Snwhitehorn free_slb_tree_node(pm->pm_slb_tree_root); 394212715Snwhitehorn} 395212715Snwhitehorn 396212715Snwhitehornstruct slbtnode * 397212715Snwhitehornslb_alloc_tree(void) 398212715Snwhitehorn{ 399212715Snwhitehorn struct slbtnode *root; 400212715Snwhitehorn 401212715Snwhitehorn root = uma_zalloc(slbt_zone, M_NOWAIT | M_ZERO); 402212715Snwhitehorn root->ua_level = UAD_ROOT_LEVEL; 403212715Snwhitehorn 404212715Snwhitehorn return (root); 405212715Snwhitehorn} 406212715Snwhitehorn 407209975Snwhitehorn/* Lock entries mapping kernel text and stacks */ 408209975Snwhitehorn 409209975Snwhitehorn#define SLB_SPILLABLE(slbe) \ 410209975Snwhitehorn (((slbe & SLBE_ESID_MASK) < VM_MIN_KERNEL_ADDRESS && \ 411209975Snwhitehorn (slbe & SLBE_ESID_MASK) > 16*SEGMENT_LENGTH) || \ 412209975Snwhitehorn (slbe & SLBE_ESID_MASK) > VM_MAX_KERNEL_ADDRESS) 413209975Snwhitehornvoid 414212722Snwhitehornslb_insert_kernel(uint64_t slbe, uint64_t slbv) 415209975Snwhitehorn{ 416212722Snwhitehorn struct slb *slbcache; 417212722Snwhitehorn int i, j; 418209975Snwhitehorn 419209975Snwhitehorn /* We don't want to be preempted while modifying the kernel map */ 420209975Snwhitehorn critical_enter(); 421209975Snwhitehorn 422212722Snwhitehorn slbcache = PCPU_GET(slb); 423209975Snwhitehorn 424214574Snwhitehorn /* Check for an unused slot, abusing the user slot as a full flag */ 425214574Snwhitehorn if (slbcache[USER_SLB_SLOT].slbe == 0) { 426214574Snwhitehorn for (i = 0; i < USER_SLB_SLOT; i++) { 427212722Snwhitehorn if (!(slbcache[i].slbe & SLBE_VALID)) 428212722Snwhitehorn goto fillkernslb; 429212722Snwhitehorn } 430209975Snwhitehorn 431214574Snwhitehorn if (i == USER_SLB_SLOT) 432214574Snwhitehorn slbcache[USER_SLB_SLOT].slbe = 1; 433212722Snwhitehorn } 434212722Snwhitehorn 435209975Snwhitehorn for (i = mftb() % 64, j = 0; j < 64; j++, i = (i+1) % 64) { 436214574Snwhitehorn if (i == USER_SLB_SLOT) 437212722Snwhitehorn continue; 438209975Snwhitehorn 439212722Snwhitehorn if (SLB_SPILLABLE(slbcache[i].slbe)) 440209975Snwhitehorn break; 441209975Snwhitehorn } 442209975Snwhitehorn 443212722Snwhitehorn KASSERT(j < 64, ("All kernel SLB slots locked!")); 444209975Snwhitehorn 445212722Snwhitehornfillkernslb: 446212722Snwhitehorn slbcache[i].slbv = slbv; 447212722Snwhitehorn slbcache[i].slbe = slbe | (uint64_t)i; 448209975Snwhitehorn 449209975Snwhitehorn /* If it is for this CPU, put it in the SLB right away */ 450212722Snwhitehorn if (pmap_bootstrapped) { 451209975Snwhitehorn /* slbie not required */ 452209975Snwhitehorn __asm __volatile ("slbmte %0, %1" :: 453212722Snwhitehorn "r"(slbcache[i].slbv), "r"(slbcache[i].slbe)); 454209975Snwhitehorn } 455209975Snwhitehorn 456209975Snwhitehorn critical_exit(); 457209975Snwhitehorn} 458209975Snwhitehorn 459212722Snwhitehornvoid 460212722Snwhitehornslb_insert_user(pmap_t pm, struct slb *slb) 461212722Snwhitehorn{ 462212722Snwhitehorn int i; 463209975Snwhitehorn 464212722Snwhitehorn PMAP_LOCK_ASSERT(pm, MA_OWNED); 465212722Snwhitehorn 466212722Snwhitehorn if (pm->pm_slb_len < 64) { 467212722Snwhitehorn i = pm->pm_slb_len; 468212722Snwhitehorn pm->pm_slb_len++; 469212722Snwhitehorn } else { 470212722Snwhitehorn i = mftb() % 64; 471212722Snwhitehorn } 472212722Snwhitehorn 473212722Snwhitehorn /* Note that this replacement is atomic with respect to trap_subr */ 474212722Snwhitehorn pm->pm_slb[i] = slb; 475212722Snwhitehorn} 476212722Snwhitehorn 477209975Snwhitehornstatic void 478209975Snwhitehornslb_zone_init(void *dummy) 479209975Snwhitehorn{ 480209975Snwhitehorn 481212715Snwhitehorn slbt_zone = uma_zcreate("SLB tree node", sizeof(struct slbtnode), 482209975Snwhitehorn NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM); 483212722Snwhitehorn slb_cache_zone = uma_zcreate("SLB cache", 64*sizeof(struct slb *), 484209975Snwhitehorn NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM); 485209975Snwhitehorn} 486209975Snwhitehorn 487212722Snwhitehornstruct slb ** 488209975Snwhitehornslb_alloc_user_cache(void) 489209975Snwhitehorn{ 490209975Snwhitehorn return (uma_zalloc(slb_cache_zone, M_ZERO)); 491209975Snwhitehorn} 492209975Snwhitehorn 493209975Snwhitehornvoid 494212722Snwhitehornslb_free_user_cache(struct slb **slb) 495209975Snwhitehorn{ 496209975Snwhitehorn uma_zfree(slb_cache_zone, slb); 497209975Snwhitehorn} 498