slb.c (210704) | slb.c (212715) |
---|---|
1/*- 2 * Copyright (c) 2010 Nathan Whitehorn 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * --- 9 unchanged lines hidden (view full) --- 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 * | 1/*- 2 * Copyright (c) 2010 Nathan Whitehorn 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * --- 9 unchanged lines hidden (view full) --- 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 * |
26 * $FreeBSD: head/sys/powerpc/aim/slb.c 210704 2010-07-31 21:35:15Z nwhitehorn $ | 26 * $FreeBSD: head/sys/powerpc/aim/slb.c 212715 2010-09-16 00:22:25Z nwhitehorn $ |
27 */ 28 29#include <sys/param.h> 30#include <sys/kernel.h> 31#include <sys/lock.h> 32#include <sys/mutex.h> 33#include <sys/proc.h> 34#include <sys/systm.h> | 27 */ 28 29#include <sys/param.h> 30#include <sys/kernel.h> 31#include <sys/lock.h> 32#include <sys/mutex.h> 33#include <sys/proc.h> 34#include <sys/systm.h> |
35#include <sys/tree.h> | |
36 37#include <vm/vm.h> 38#include <vm/pmap.h> 39#include <vm/uma.h> 40#include <vm/vm_map.h> 41 42#include <machine/md_var.h> 43#include <machine/pmap.h> 44#include <machine/vmparam.h> 45 46uintptr_t moea64_get_unique_vsid(void); 47void moea64_release_vsid(uint64_t vsid); | 35 36#include <vm/vm.h> 37#include <vm/pmap.h> 38#include <vm/uma.h> 39#include <vm/vm_map.h> 40 41#include <machine/md_var.h> 42#include <machine/pmap.h> 43#include <machine/vmparam.h> 44 45uintptr_t moea64_get_unique_vsid(void); 46void moea64_release_vsid(uint64_t vsid); |
47static void slb_zone_init(void *); |
|
48 | 48 |
49struct slbcontainer { 50 struct slb slb; 51 SPLAY_ENTRY(slbcontainer) slb_node; | 49uma_zone_t slbt_zone; 50uma_zone_t slb_cache_zone; 51 52SYSINIT(slb_zone_init, SI_SUB_KMEM, SI_ORDER_ANY, slb_zone_init, NULL); 53 54struct slbtnode { 55 uint16_t ua_alloc; 56 uint8_t ua_level; 57 /* Only 36 bits needed for full 64-bit address space. */ 58 uint64_t ua_base; 59 union { 60 struct slbtnode *ua_child[16]; 61 struct slb slb_entries[16]; 62 } u; |
52}; 53 | 63}; 64 |
54static int slb_compare(struct slbcontainer *a, struct slbcontainer *b); 55static void slb_zone_init(void *); | 65/* 66 * For a full 64-bit address space, there are 36 bits in play in an 67 * esid, so 8 levels, with the leaf being at level 0. 68 * 69 * |3333|3322|2222|2222|1111|1111|11 | | | esid 70 * |5432|1098|7654|3210|9876|5432|1098|7654|3210| bits 71 * +----+----+----+----+----+----+----+----+----+-------- 72 * | 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 | level 73 */ 74#define UAD_ROOT_LEVEL 8 75#define UAD_LEAF_LEVEL 0 |
56 | 76 |
57SPLAY_PROTOTYPE(slb_tree, slbcontainer, slb_node, slb_compare); 58SPLAY_GENERATE(slb_tree, slbcontainer, slb_node, slb_compare); | 77static inline int 78esid2idx(uint64_t esid, int level) 79{ 80 int shift; |
59 | 81 |
60uma_zone_t slb_zone; 61uma_zone_t slb_cache_zone; | 82 shift = level * 4; 83 return ((esid >> shift) & 0xF); 84} |
62 | 85 |
63SYSINIT(slb_zone_init, SI_SUB_KMEM, SI_ORDER_ANY, slb_zone_init, NULL); | 86/* 87 * The ua_base field should have 0 bits after the first 4*(level+1) 88 * bits; i.e. only 89 */ 90#define uad_baseok(ua) \ 91 (esid2base(ua->ua_base, ua->ua_level) == ua->ua_base) |
64 | 92 |
65int 66va_to_slb_entry(pmap_t pm, vm_offset_t va, struct slb *slb) | 93 94static inline uint64_t 95esid2base(uint64_t esid, int level) |
67{ | 96{ |
68 struct slbcontainer cont, *found; 69 uint64_t esid; | 97 uint64_t mask; 98 int shift; |
70 | 99 |
100 shift = (level + 1) * 4; 101 mask = ~((1ULL << shift) - 1); 102 return (esid & mask); 103} 104 105/* 106 * Allocate a new leaf node for the specified esid/vmhandle from the 107 * parent node. 108 */ 109static struct slb * 110make_new_leaf(uint64_t esid, uint64_t slbv, struct slbtnode *parent) 111{ 112 struct slbtnode *child; 113 struct slb *retval; 114 int idx; 115 116 idx = esid2idx(esid, parent->ua_level); 117 KASSERT(parent->u.ua_child[idx] == NULL, ("Child already exists!")); 118 119 /* unlock and M_WAITOK and loop? */ 120 child = uma_zalloc(slbt_zone, M_NOWAIT | M_ZERO); 121 KASSERT(child != NULL, ("unhandled NULL case")); 122 123 child->ua_level = UAD_LEAF_LEVEL; 124 child->ua_base = esid2base(esid, child->ua_level); 125 idx = esid2idx(esid, child->ua_level); 126 child->u.slb_entries[idx].slbv = slbv; 127 child->u.slb_entries[idx].slbe = (esid << SLBE_ESID_SHIFT) | SLBE_VALID; 128 setbit(&child->ua_alloc, idx); 129 130 retval = &child->u.slb_entries[idx]; 131 132 /* 133 * The above stores must be visible before the next one, so 134 * that a lockless searcher always sees a valid path through 135 * the tree. 136 */ 137 powerpc_sync(); 138 139 idx = esid2idx(esid, parent->ua_level); 140 parent->u.ua_child[idx] = child; 141 setbit(&parent->ua_alloc, idx); 142 143 return (retval); 144} 145 146/* 147 * Allocate a new intermediate node to fit between the parent and 148 * esid. 149 */ 150static struct slbtnode* 151make_intermediate(uint64_t esid, struct slbtnode *parent) 152{ 153 struct slbtnode *child, *inter; 154 int idx, level; 155 156 idx = esid2idx(esid, parent->ua_level); 157 child = parent->u.ua_child[idx]; 158 KASSERT(esid2base(esid, child->ua_level) != child->ua_base, 159 ("No need for an intermediate node?")); 160 161 /* 162 * Find the level where the existing child and our new esid 163 * meet. It must be lower than parent->ua_level or we would 164 * have chosen a different index in parent. 165 */ 166 level = child->ua_level + 1; 167 while (esid2base(esid, level) != 168 esid2base(child->ua_base, level)) 169 level++; 170 KASSERT(level < parent->ua_level, 171 ("Found splitting level %d for %09jx and %09jx, " 172 "but it's the same as %p's", 173 level, esid, child->ua_base, parent)); 174 175 /* unlock and M_WAITOK and loop? */ 176 inter = uma_zalloc(slbt_zone, M_NOWAIT | M_ZERO); 177 KASSERT(inter != NULL, ("unhandled NULL case")); 178 179 /* Set up intermediate node to point to child ... */ 180 inter->ua_level = level; 181 inter->ua_base = esid2base(esid, inter->ua_level); 182 idx = esid2idx(child->ua_base, inter->ua_level); 183 inter->u.ua_child[idx] = child; 184 setbit(&inter->ua_alloc, idx); 185 powerpc_sync(); 186 187 /* Set up parent to point to intermediate node ... */ 188 idx = esid2idx(inter->ua_base, parent->ua_level); 189 parent->u.ua_child[idx] = inter; 190 setbit(&parent->ua_alloc, idx); 191 192 return (inter); 193} 194 195uint64_t 196kernel_va_to_slbv(vm_offset_t va) 197{ 198 uint64_t esid, slbv; 199 |
|
71 esid = (uintptr_t)va >> ADDR_SR_SHFT; | 200 esid = (uintptr_t)va >> ADDR_SR_SHFT; |
72 slb->slbe = (esid << SLBE_ESID_SHIFT) | SLBE_VALID; | |
73 | 201 |
74 if (pm == kernel_pmap) { 75 /* Set kernel VSID to deterministic value */ 76 slb->slbv = va_to_vsid(kernel_pmap, va) << SLBV_VSID_SHIFT; | 202 /* Set kernel VSID to deterministic value */ 203 slbv = va_to_vsid(kernel_pmap, va) << SLBV_VSID_SHIFT; |
77 | 204 |
78 /* Figure out if this is a large-page mapping */ 79 if (hw_direct_map && va < VM_MIN_KERNEL_ADDRESS) { 80 /* 81 * XXX: If we have set up a direct map, assumes 82 * all physical memory is mapped with large pages. 83 */ 84 if (mem_valid(va, 0) == 0) 85 slb->slbv |= SLBV_L; 86 } 87 88 return (0); | 205 /* Figure out if this is a large-page mapping */ 206 if (hw_direct_map && va < VM_MIN_KERNEL_ADDRESS) { 207 /* 208 * XXX: If we have set up a direct map, assumes 209 * all physical memory is mapped with large pages. 210 */ 211 if (mem_valid(va, 0) == 0) 212 slbv |= SLBV_L; |
89 } | 213 } |
214 215 return (slbv); 216} |
|
90 | 217 |
91 PMAP_LOCK_ASSERT(pm, MA_OWNED); | 218struct slb * 219user_va_to_slb_entry(pmap_t pm, vm_offset_t va) 220{ 221 uint64_t esid = va >> ADDR_SR_SHFT; 222 struct slbtnode *ua; 223 int idx; |
92 | 224 |
93 cont.slb.slbe = slb->slbe; 94 found = SPLAY_FIND(slb_tree, &pm->pm_slbtree, &cont); | 225 ua = pm->pm_slb_tree_root; |
95 | 226 |
96 if (found == NULL) 97 return (-1); | 227 for (;;) { 228 KASSERT(uad_baseok(ua), ("uad base %016jx level %d bad!", 229 ua->ua_base, ua->ua_level)); 230 idx = esid2idx(esid, ua->ua_level); |
98 | 231 |
99 slb->slbv = found->slb.slbv; 100 return (0); | 232 /* 233 * This code is specific to ppc64 where a load is 234 * atomic, so no need for atomic_load macro. 235 */ 236 if (ua->ua_level == UAD_LEAF_LEVEL) 237 return ((ua->u.slb_entries[idx].slbe & SLBE_VALID) ? 238 &ua->u.slb_entries[idx] : NULL); 239 240 ua = ua->u.ua_child[idx]; 241 if (ua == NULL || 242 esid2base(esid, ua->ua_level) != ua->ua_base) 243 return (NULL); 244 } 245 246 return (NULL); |
101} 102 103uint64_t 104va_to_vsid(pmap_t pm, vm_offset_t va) 105{ | 247} 248 249uint64_t 250va_to_vsid(pmap_t pm, vm_offset_t va) 251{ |
106 struct slb entry; | 252 struct slb *entry; |
107 108 /* Shortcut kernel case */ 109 if (pm == kernel_pmap) 110 return (KERNEL_VSID((uintptr_t)va >> ADDR_SR_SHFT)); 111 112 /* 113 * If there is no vsid for this VA, we need to add a new entry 114 * to the PMAP's segment table. 115 */ 116 | 253 254 /* Shortcut kernel case */ 255 if (pm == kernel_pmap) 256 return (KERNEL_VSID((uintptr_t)va >> ADDR_SR_SHFT)); 257 258 /* 259 * If there is no vsid for this VA, we need to add a new entry 260 * to the PMAP's segment table. 261 */ 262 |
117 if (va_to_slb_entry(pm, va, &entry) != 0) | 263 entry = user_va_to_slb_entry(pm, va); 264 265 if (entry == NULL) |
118 return (allocate_vsid(pm, (uintptr_t)va >> ADDR_SR_SHFT, 0)); 119 | 266 return (allocate_vsid(pm, (uintptr_t)va >> ADDR_SR_SHFT, 0)); 267 |
120 return ((entry.slbv & SLBV_VSID_MASK) >> SLBV_VSID_SHIFT); | 268 return ((entry->slbv & SLBV_VSID_MASK) >> SLBV_VSID_SHIFT); |
121} 122 123uint64_t 124allocate_vsid(pmap_t pm, uint64_t esid, int large) 125{ | 269} 270 271uint64_t 272allocate_vsid(pmap_t pm, uint64_t esid, int large) 273{ |
126 uint64_t vsid; 127 struct slbcontainer *slb_entry, kern_entry; 128 struct slb *prespill; | 274 uint64_t vsid, slbv; 275 struct slbtnode *ua, *next, *inter; 276 struct slb *slb; 277 int idx; |
129 | 278 |
130 prespill = NULL; | 279 KASSERT(pm != kernel_pmap, ("Attempting to allocate a kernel VSID")); |
131 | 280 |
132 if (pm == kernel_pmap) { 133 vsid = va_to_vsid(pm, esid << ADDR_SR_SHFT); 134 slb_entry = &kern_entry; 135 prespill = PCPU_GET(slb); 136 } else { 137 vsid = moea64_get_unique_vsid(); 138 slb_entry = uma_zalloc(slb_zone, M_NOWAIT); | 281 PMAP_LOCK_ASSERT(pm, MA_OWNED); 282 vsid = moea64_get_unique_vsid(); |
139 | 283 |
140 if (slb_entry == NULL) 141 panic("Could not allocate SLB mapping!"); | 284 slbv = vsid << SLBV_VSID_SHIFT; 285 if (large) 286 slbv |= SLBV_L; |
142 | 287 |
143 prespill = pm->pm_slb; 144 } | 288 ua = pm->pm_slb_tree_root; |
145 | 289 |
146 slb_entry->slb.slbe = (esid << SLBE_ESID_SHIFT) | SLBE_VALID; 147 slb_entry->slb.slbv = vsid << SLBV_VSID_SHIFT; | 290 /* Descend to the correct leaf or NULL pointer. */ 291 for (;;) { 292 KASSERT(uad_baseok(ua), 293 ("uad base %09jx level %d bad!", ua->ua_base, ua->ua_level)); 294 idx = esid2idx(esid, ua->ua_level); |
148 | 295 |
149 if (large) 150 slb_entry->slb.slbv |= SLBV_L; | 296 if (ua->ua_level == UAD_LEAF_LEVEL) { 297 ua->u.slb_entries[idx].slbv = slbv; 298 eieio(); 299 ua->u.slb_entries[idx].slbe = (esid << SLBE_ESID_SHIFT) 300 | SLBE_VALID; 301 setbit(&ua->ua_alloc, idx); 302 slb = &ua->u.slb_entries[idx]; 303 break; 304 } |
151 | 305 |
152 if (pm != kernel_pmap) { 153 PMAP_LOCK_ASSERT(pm, MA_OWNED); 154 SPLAY_INSERT(slb_tree, &pm->pm_slbtree, slb_entry); | 306 next = ua->u.ua_child[idx]; 307 if (next == NULL) { 308 slb = make_new_leaf(esid, slbv, ua); 309 break; 310 } 311 312 /* 313 * Check if the next item down has an okay ua_base. 314 * If not, we need to allocate an intermediate node. 315 */ 316 if (esid2base(esid, next->ua_level) != next->ua_base) { 317 inter = make_intermediate(esid, ua); 318 slb = make_new_leaf(esid, slbv, inter); 319 break; 320 } 321 322 ua = next; |
155 } 156 157 /* 158 * Someone probably wants this soon, and it may be a wired 159 * SLB mapping, so pre-spill this entry. 160 */ | 323 } 324 325 /* 326 * Someone probably wants this soon, and it may be a wired 327 * SLB mapping, so pre-spill this entry. 328 */ |
161 if (prespill != NULL) 162 slb_insert(pm, prespill, &slb_entry->slb); | 329 eieio(); 330 slb_insert(pm, pm->pm_slb, slb); |
163 164 return (vsid); 165} 166 | 331 332 return (vsid); 333} 334 |
335void 336free_vsid(pmap_t pm, uint64_t esid, int large) 337{ 338 struct slbtnode *ua; 339 int idx; 340 341 PMAP_LOCK_ASSERT(pm, MA_OWNED); 342 343 ua = pm->pm_slb_tree_root; 344 /* Descend to the correct leaf. */ 345 for (;;) { 346 KASSERT(uad_baseok(ua), 347 ("uad base %09jx level %d bad!", ua->ua_base, ua->ua_level)); 348 349 idx = esid2idx(esid, ua->ua_level); 350 if (ua->ua_level == UAD_LEAF_LEVEL) { 351 ua->u.slb_entries[idx].slbv = 0; 352 eieio(); 353 ua->u.slb_entries[idx].slbe = 0; 354 clrbit(&ua->ua_alloc, idx); 355 return; 356 } 357 358 ua = ua->u.ua_child[idx]; 359 if (ua == NULL || 360 esid2base(esid, ua->ua_level) != ua->ua_base) { 361 /* Perhaps just return instead of assert? */ 362 KASSERT(0, 363 ("Asked to remove an entry that was never inserted!")); 364 return; 365 } 366 } 367} 368 369static void 370free_slb_tree_node(struct slbtnode *ua) 371{ 372 int idx; 373 374 for (idx = 0; idx < 16; idx++) { 375 if (ua->ua_level != UAD_LEAF_LEVEL) { 376 if (ua->u.ua_child[idx] != NULL) 377 free_slb_tree_node(ua->u.ua_child[idx]); 378 } else { 379 if (ua->u.slb_entries[idx].slbv != 0) 380 moea64_release_vsid(ua->u.slb_entries[idx].slbv 381 >> SLBV_VSID_SHIFT); 382 } 383 } 384 385 uma_zfree(slbt_zone, ua); 386} 387 388void 389slb_free_tree(pmap_t pm) 390{ 391 392 free_slb_tree_node(pm->pm_slb_tree_root); 393} 394 395struct slbtnode * 396slb_alloc_tree(void) 397{ 398 struct slbtnode *root; 399 400 root = uma_zalloc(slbt_zone, M_NOWAIT | M_ZERO); 401 root->ua_level = UAD_ROOT_LEVEL; 402 403 return (root); 404} 405 |
|
167/* Lock entries mapping kernel text and stacks */ 168 169#define SLB_SPILLABLE(slbe) \ 170 (((slbe & SLBE_ESID_MASK) < VM_MIN_KERNEL_ADDRESS && \ 171 (slbe & SLBE_ESID_MASK) > 16*SEGMENT_LENGTH) || \ 172 (slbe & SLBE_ESID_MASK) > VM_MAX_KERNEL_ADDRESS) 173void 174slb_insert(pmap_t pm, struct slb *slbcache, struct slb *slb_entry) --- 42 unchanged lines hidden (view full) --- 217 __asm __volatile ("slbmte %0, %1" :: 218 "r"(slbcache[to_spill].slbv), 219 "r"(slbcache[to_spill].slbe)); 220 } 221 222 critical_exit(); 223} 224 | 406/* Lock entries mapping kernel text and stacks */ 407 408#define SLB_SPILLABLE(slbe) \ 409 (((slbe & SLBE_ESID_MASK) < VM_MIN_KERNEL_ADDRESS && \ 410 (slbe & SLBE_ESID_MASK) > 16*SEGMENT_LENGTH) || \ 411 (slbe & SLBE_ESID_MASK) > VM_MAX_KERNEL_ADDRESS) 412void 413slb_insert(pmap_t pm, struct slb *slbcache, struct slb *slb_entry) --- 42 unchanged lines hidden (view full) --- 456 __asm __volatile ("slbmte %0, %1" :: 457 "r"(slbcache[to_spill].slbv), 458 "r"(slbcache[to_spill].slbe)); 459 } 460 461 critical_exit(); 462} 463 |
225int 226vsid_to_esid(pmap_t pm, uint64_t vsid, uint64_t *esid) 227{ 228 uint64_t slbv; 229 struct slbcontainer *entry; | |
230 | 464 |
231#ifdef INVARIANTS 232 if (pm == kernel_pmap) 233 panic("vsid_to_esid only works on user pmaps"); 234 235 PMAP_LOCK_ASSERT(pm, MA_OWNED); 236#endif 237 238 slbv = vsid << SLBV_VSID_SHIFT; 239 240 SPLAY_FOREACH(entry, slb_tree, &pm->pm_slbtree) { 241 if (slbv == entry->slb.slbv) { 242 *esid = entry->slb.slbe >> SLBE_ESID_SHIFT; 243 return (0); 244 } 245 } 246 247 return (-1); 248} 249 250void 251free_vsids(pmap_t pm) 252{ 253 struct slbcontainer *entry; 254 255 while (!SPLAY_EMPTY(&pm->pm_slbtree)) { 256 entry = SPLAY_MIN(slb_tree, &pm->pm_slbtree); 257 258 SPLAY_REMOVE(slb_tree, &pm->pm_slbtree, entry); 259 260 moea64_release_vsid(entry->slb.slbv >> SLBV_VSID_SHIFT); 261 uma_zfree(slb_zone, entry); 262 } 263} 264 265static int 266slb_compare(struct slbcontainer *a, struct slbcontainer *b) 267{ 268 if (a->slb.slbe == b->slb.slbe) 269 return (0); 270 else if (a->slb.slbe < b->slb.slbe) 271 return (-1); 272 else 273 return (1); 274} 275 | |
276static void 277slb_zone_init(void *dummy) 278{ 279 | 465static void 466slb_zone_init(void *dummy) 467{ 468 |
280 slb_zone = uma_zcreate("SLB segment", sizeof(struct slbcontainer), | 469 slbt_zone = uma_zcreate("SLB tree node", sizeof(struct slbtnode), |
281 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM); 282 slb_cache_zone = uma_zcreate("SLB cache", 64*sizeof(struct slb), 283 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM); 284} 285 286struct slb * 287slb_alloc_user_cache(void) 288{ 289 return (uma_zalloc(slb_cache_zone, M_ZERO)); 290} 291 292void 293slb_free_user_cache(struct slb *slb) 294{ 295 uma_zfree(slb_cache_zone, slb); 296} | 470 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM); 471 slb_cache_zone = uma_zcreate("SLB cache", 64*sizeof(struct slb), 472 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM); 473} 474 475struct slb * 476slb_alloc_user_cache(void) 477{ 478 return (uma_zalloc(slb_cache_zone, M_ZERO)); 479} 480 481void 482slb_free_user_cache(struct slb *slb) 483{ 484 uma_zfree(slb_cache_zone, slb); 485} |