Deleted Added
full compact
slb.c (212715) slb.c (212722)
1/*-
2 * Copyright (c) 2010 Nathan Whitehorn
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *

--- 9 unchanged lines hidden (view full) ---

18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 *
1/*-
2 * Copyright (c) 2010 Nathan Whitehorn
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *

--- 9 unchanged lines hidden (view full) ---

18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 *
26 * $FreeBSD: head/sys/powerpc/aim/slb.c 212715 2010-09-16 00:22:25Z nwhitehorn $
26 * $FreeBSD: head/sys/powerpc/aim/slb.c 212722 2010-09-16 03:46:17Z nwhitehorn $
27 */
28
29#include <sys/param.h>
30#include <sys/kernel.h>
31#include <sys/lock.h>
32#include <sys/mutex.h>
33#include <sys/proc.h>
34#include <sys/systm.h>

--- 223 unchanged lines hidden (view full) ---

258 /*
259 * If there is no vsid for this VA, we need to add a new entry
260 * to the PMAP's segment table.
261 */
262
263 entry = user_va_to_slb_entry(pm, va);
264
265 if (entry == NULL)
27 */
28
29#include <sys/param.h>
30#include <sys/kernel.h>
31#include <sys/lock.h>
32#include <sys/mutex.h>
33#include <sys/proc.h>
34#include <sys/systm.h>

--- 223 unchanged lines hidden (view full) ---

258 /*
259 * If there is no vsid for this VA, we need to add a new entry
260 * to the PMAP's segment table.
261 */
262
263 entry = user_va_to_slb_entry(pm, va);
264
265 if (entry == NULL)
266 return (allocate_vsid(pm, (uintptr_t)va >> ADDR_SR_SHFT, 0));
266 return (allocate_user_vsid(pm,
267 (uintptr_t)va >> ADDR_SR_SHFT, 0));
267
268 return ((entry->slbv & SLBV_VSID_MASK) >> SLBV_VSID_SHIFT);
269}
270
271uint64_t
268
269 return ((entry->slbv & SLBV_VSID_MASK) >> SLBV_VSID_SHIFT);
270}
271
272uint64_t
272allocate_vsid(pmap_t pm, uint64_t esid, int large)
273allocate_user_vsid(pmap_t pm, uint64_t esid, int large)
273{
274 uint64_t vsid, slbv;
275 struct slbtnode *ua, *next, *inter;
276 struct slb *slb;
277 int idx;
278
279 KASSERT(pm != kernel_pmap, ("Attempting to allocate a kernel VSID"));
280

--- 41 unchanged lines hidden (view full) ---

322 ua = next;
323 }
324
325 /*
326 * Someone probably wants this soon, and it may be a wired
327 * SLB mapping, so pre-spill this entry.
328 */
329 eieio();
274{
275 uint64_t vsid, slbv;
276 struct slbtnode *ua, *next, *inter;
277 struct slb *slb;
278 int idx;
279
280 KASSERT(pm != kernel_pmap, ("Attempting to allocate a kernel VSID"));
281

--- 41 unchanged lines hidden (view full) ---

323 ua = next;
324 }
325
326 /*
327 * Someone probably wants this soon, and it may be a wired
328 * SLB mapping, so pre-spill this entry.
329 */
330 eieio();
330 slb_insert(pm, pm->pm_slb, slb);
331 slb_insert_user(pm, slb);
331
332 return (vsid);
333}
334
335void
336free_vsid(pmap_t pm, uint64_t esid, int large)
337{
338 struct slbtnode *ua;

--- 66 unchanged lines hidden (view full) ---

405
406/* Lock entries mapping kernel text and stacks */
407
408#define SLB_SPILLABLE(slbe) \
409 (((slbe & SLBE_ESID_MASK) < VM_MIN_KERNEL_ADDRESS && \
410 (slbe & SLBE_ESID_MASK) > 16*SEGMENT_LENGTH) || \
411 (slbe & SLBE_ESID_MASK) > VM_MAX_KERNEL_ADDRESS)
412void
332
333 return (vsid);
334}
335
336void
337free_vsid(pmap_t pm, uint64_t esid, int large)
338{
339 struct slbtnode *ua;

--- 66 unchanged lines hidden (view full) ---

406
407/* Lock entries mapping kernel text and stacks */
408
409#define SLB_SPILLABLE(slbe) \
410 (((slbe & SLBE_ESID_MASK) < VM_MIN_KERNEL_ADDRESS && \
411 (slbe & SLBE_ESID_MASK) > 16*SEGMENT_LENGTH) || \
412 (slbe & SLBE_ESID_MASK) > VM_MAX_KERNEL_ADDRESS)
413void
413slb_insert(pmap_t pm, struct slb *slbcache, struct slb *slb_entry)
414slb_insert_kernel(uint64_t slbe, uint64_t slbv)
414{
415{
415 uint64_t slbe, slbv;
416 int i, j, to_spill;
416 struct slb *slbcache;
417 int i, j;
417
418 /* We don't want to be preempted while modifying the kernel map */
419 critical_enter();
420
418
419 /* We don't want to be preempted while modifying the kernel map */
420 critical_enter();
421
421 to_spill = -1;
422 slbv = slb_entry->slbv;
423 slbe = slb_entry->slbe;
422 slbcache = PCPU_GET(slb);
424
423
425 /* Hunt for a likely candidate */
424 /* Check for an unused slot, abusing the USER_SR slot as a full flag */
425 if (slbcache[USER_SR].slbe == 0) {
426 for (i = 0; i < USER_SR; i++) {
427 if (!(slbcache[i].slbe & SLBE_VALID))
428 goto fillkernslb;
429 }
426
430
431 if (i == USER_SR)
432 slbcache[USER_SR].slbe = 1;
433 }
434
427 for (i = mftb() % 64, j = 0; j < 64; j++, i = (i+1) % 64) {
435 for (i = mftb() % 64, j = 0; j < 64; j++, i = (i+1) % 64) {
428 if (pm == kernel_pmap && i == USER_SR)
429 continue;
436 if (i == USER_SR)
437 continue;
430
438
431 if (!(slbcache[i].slbe & SLBE_VALID)) {
432 to_spill = i;
439 if (SLB_SPILLABLE(slbcache[i].slbe))
433 break;
440 break;
434 }
435
436 if (to_spill < 0 && (pm != kernel_pmap ||
437 SLB_SPILLABLE(slbcache[i].slbe)))
438 to_spill = i;
439 }
440
441 }
442
441 if (to_spill < 0)
442 panic("SLB spill on ESID %#lx, but no available candidates!\n",
443 (slbe & SLBE_ESID_MASK) >> SLBE_ESID_SHIFT);
443 KASSERT(j < 64, ("All kernel SLB slots locked!"));
444
444
445 if (slbcache[to_spill].slbe & SLBE_VALID) {
446 /* Invalidate this first to avoid races */
447 slbcache[to_spill].slbe = 0;
448 mb();
449 }
450 slbcache[to_spill].slbv = slbv;
451 slbcache[to_spill].slbe = slbe | (uint64_t)to_spill;
445fillkernslb:
446 slbcache[i].slbv = slbv;
447 slbcache[i].slbe = slbe | (uint64_t)i;
452
453 /* If it is for this CPU, put it in the SLB right away */
448
449 /* If it is for this CPU, put it in the SLB right away */
454 if (pm == kernel_pmap && pmap_bootstrapped) {
450 if (pmap_bootstrapped) {
455 /* slbie not required */
456 __asm __volatile ("slbmte %0, %1" ::
451 /* slbie not required */
452 __asm __volatile ("slbmte %0, %1" ::
457 "r"(slbcache[to_spill].slbv),
458 "r"(slbcache[to_spill].slbe));
453 "r"(slbcache[i].slbv), "r"(slbcache[i].slbe));
459 }
460
461 critical_exit();
462}
463
454 }
455
456 critical_exit();
457}
458
459void
460slb_insert_user(pmap_t pm, struct slb *slb)
461{
462 int i;
464
463
464 PMAP_LOCK_ASSERT(pm, MA_OWNED);
465
466 if (pm->pm_slb_len < 64) {
467 i = pm->pm_slb_len;
468 pm->pm_slb_len++;
469 } else {
470 i = mftb() % 64;
471 }
472
473 /* Note that this replacement is atomic with respect to trap_subr */
474 pm->pm_slb[i] = slb;
475}
476
465static void
466slb_zone_init(void *dummy)
467{
468
469 slbt_zone = uma_zcreate("SLB tree node", sizeof(struct slbtnode),
470 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM);
477static void
478slb_zone_init(void *dummy)
479{
480
481 slbt_zone = uma_zcreate("SLB tree node", sizeof(struct slbtnode),
482 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM);
471 slb_cache_zone = uma_zcreate("SLB cache", 64*sizeof(struct slb),
483 slb_cache_zone = uma_zcreate("SLB cache", 64*sizeof(struct slb *),
472 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM);
473}
474
484 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM);
485}
486
475struct slb *
487struct slb **
476slb_alloc_user_cache(void)
477{
478 return (uma_zalloc(slb_cache_zone, M_ZERO));
479}
480
481void
488slb_alloc_user_cache(void)
489{
490 return (uma_zalloc(slb_cache_zone, M_ZERO));
491}
492
493void
482slb_free_user_cache(struct slb *slb)
494slb_free_user_cache(struct slb **slb)
483{
484 uma_zfree(slb_cache_zone, slb);
485}
495{
496 uma_zfree(slb_cache_zone, slb);
497}