Deleted Added
full compact
slb.c (217451) slb.c (222620)
1/*-
2 * Copyright (c) 2010 Nathan Whitehorn
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *

--- 9 unchanged lines hidden (view full) ---

18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 *
1/*-
2 * Copyright (c) 2010 Nathan Whitehorn
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *

--- 9 unchanged lines hidden (view full) ---

18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 *
26 * $FreeBSD: head/sys/powerpc/aim/slb.c 217451 2011-01-15 19:16:05Z andreast $
26 * $FreeBSD: head/sys/powerpc/aim/slb.c 222620 2011-06-02 14:25:52Z nwhitehorn $
27 */
28
29#include <sys/param.h>
30#include <sys/kernel.h>
31#include <sys/lock.h>
32#include <sys/mutex.h>
33#include <sys/proc.h>
34#include <sys/systm.h>

--- 11 unchanged lines hidden (view full) ---

46#include <machine/platform.h>
47#include <machine/pmap.h>
48#include <machine/vmparam.h>
49
50uintptr_t moea64_get_unique_vsid(void);
51void moea64_release_vsid(uint64_t vsid);
52static void slb_zone_init(void *);
53
27 */
28
29#include <sys/param.h>
30#include <sys/kernel.h>
31#include <sys/lock.h>
32#include <sys/mutex.h>
33#include <sys/proc.h>
34#include <sys/systm.h>

--- 11 unchanged lines hidden (view full) ---

46#include <machine/platform.h>
47#include <machine/pmap.h>
48#include <machine/vmparam.h>
49
50uintptr_t moea64_get_unique_vsid(void);
51void moea64_release_vsid(uint64_t vsid);
52static void slb_zone_init(void *);
53
54uma_zone_t slbt_zone;
55uma_zone_t slb_cache_zone;
54static uma_zone_t slbt_zone;
55static uma_zone_t slb_cache_zone;
56int n_slbs = 64;
56
57SYSINIT(slb_zone_init, SI_SUB_KMEM, SI_ORDER_ANY, slb_zone_init, NULL);
58
59struct slbtnode {
60 uint16_t ua_alloc;
61 uint8_t ua_level;
62 /* Only 36 bits needed for full 64-bit address space. */
63 uint64_t ua_base;

--- 357 unchanged lines hidden (view full) ---

421
422 /* We don't want to be preempted while modifying the kernel map */
423 critical_enter();
424
425 slbcache = PCPU_GET(slb);
426
427 /* Check for an unused slot, abusing the user slot as a full flag */
428 if (slbcache[USER_SLB_SLOT].slbe == 0) {
57
58SYSINIT(slb_zone_init, SI_SUB_KMEM, SI_ORDER_ANY, slb_zone_init, NULL);
59
60struct slbtnode {
61 uint16_t ua_alloc;
62 uint8_t ua_level;
63 /* Only 36 bits needed for full 64-bit address space. */
64 uint64_t ua_base;

--- 357 unchanged lines hidden (view full) ---

422
423 /* We don't want to be preempted while modifying the kernel map */
424 critical_enter();
425
426 slbcache = PCPU_GET(slb);
427
428 /* Check for an unused slot, abusing the user slot as a full flag */
429 if (slbcache[USER_SLB_SLOT].slbe == 0) {
429 for (i = 0; i < USER_SLB_SLOT; i++) {
430 for (i = 0; i < n_slbs; i++) {
431 if (i == USER_SLB_SLOT)
432 continue;
430 if (!(slbcache[i].slbe & SLBE_VALID))
431 goto fillkernslb;
432 }
433
433 if (!(slbcache[i].slbe & SLBE_VALID))
434 goto fillkernslb;
435 }
436
434 if (i == USER_SLB_SLOT)
437 if (i == n_slbs)
435 slbcache[USER_SLB_SLOT].slbe = 1;
436 }
437
438 slbcache[USER_SLB_SLOT].slbe = 1;
439 }
440
438 for (i = mftb() % 64, j = 0; j < 64; j++, i = (i+1) % 64) {
441 for (i = mftb() % n_slbs, j = 0; j < n_slbs; j++, i = (i+1) % n_slbs) {
439 if (i == USER_SLB_SLOT)
440 continue;
441
442 if (SLB_SPILLABLE(slbcache[i].slbe))
443 break;
444 }
445
442 if (i == USER_SLB_SLOT)
443 continue;
444
445 if (SLB_SPILLABLE(slbcache[i].slbe))
446 break;
447 }
448
446 KASSERT(j < 64, ("All kernel SLB slots locked!"));
449 KASSERT(j < n_slbs, ("All kernel SLB slots locked!"));
447
448fillkernslb:
450
451fillkernslb:
452 KASSERT(i != USER_SLB_SLOT,
453 ("Filling user SLB slot with a kernel mapping"));
449 slbcache[i].slbv = slbv;
450 slbcache[i].slbe = slbe | (uint64_t)i;
451
452 /* If it is for this CPU, put it in the SLB right away */
453 if (pmap_bootstrapped) {
454 /* slbie not required */
455 __asm __volatile ("slbmte %0, %1" ::
456 "r"(slbcache[i].slbv), "r"(slbcache[i].slbe));

--- 4 unchanged lines hidden (view full) ---

461
462void
463slb_insert_user(pmap_t pm, struct slb *slb)
464{
465 int i;
466
467 PMAP_LOCK_ASSERT(pm, MA_OWNED);
468
454 slbcache[i].slbv = slbv;
455 slbcache[i].slbe = slbe | (uint64_t)i;
456
457 /* If it is for this CPU, put it in the SLB right away */
458 if (pmap_bootstrapped) {
459 /* slbie not required */
460 __asm __volatile ("slbmte %0, %1" ::
461 "r"(slbcache[i].slbv), "r"(slbcache[i].slbe));

--- 4 unchanged lines hidden (view full) ---

466
467void
468slb_insert_user(pmap_t pm, struct slb *slb)
469{
470 int i;
471
472 PMAP_LOCK_ASSERT(pm, MA_OWNED);
473
469 if (pm->pm_slb_len < 64) {
474 if (pm->pm_slb_len < n_slbs) {
470 i = pm->pm_slb_len;
471 pm->pm_slb_len++;
472 } else {
475 i = pm->pm_slb_len;
476 pm->pm_slb_len++;
477 } else {
473 i = mftb() % 64;
478 i = mftb() % n_slbs;
474 }
475
476 /* Note that this replacement is atomic with respect to trap_subr */
477 pm->pm_slb[i] = slb;
478}
479
480static void *
481slb_uma_real_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)

--- 34 unchanged lines hidden (view full) ---

516}
517
518static void
519slb_zone_init(void *dummy)
520{
521
522 slbt_zone = uma_zcreate("SLB tree node", sizeof(struct slbtnode),
523 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM);
479 }
480
481 /* Note that this replacement is atomic with respect to trap_subr */
482 pm->pm_slb[i] = slb;
483}
484
485static void *
486slb_uma_real_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)

--- 34 unchanged lines hidden (view full) ---

521}
522
523static void
524slb_zone_init(void *dummy)
525{
526
527 slbt_zone = uma_zcreate("SLB tree node", sizeof(struct slbtnode),
528 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM);
524 slb_cache_zone = uma_zcreate("SLB cache", 64*sizeof(struct slb *),
525 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM);
529 slb_cache_zone = uma_zcreate("SLB cache",
530 (n_slbs + 1)*sizeof(struct slb *), NULL, NULL, NULL, NULL,
531 UMA_ALIGN_PTR, UMA_ZONE_VM);
526
527 if (platform_real_maxaddr() != VM_MAX_ADDRESS) {
528 uma_zone_set_allocf(slb_cache_zone, slb_uma_real_alloc);
529 uma_zone_set_allocf(slbt_zone, slb_uma_real_alloc);
530 }
531}
532
533struct slb **
534slb_alloc_user_cache(void)
535{
536 return (uma_zalloc(slb_cache_zone, M_ZERO));
537}
538
539void
540slb_free_user_cache(struct slb **slb)
541{
542 uma_zfree(slb_cache_zone, slb);
543}
532
533 if (platform_real_maxaddr() != VM_MAX_ADDRESS) {
534 uma_zone_set_allocf(slb_cache_zone, slb_uma_real_alloc);
535 uma_zone_set_allocf(slbt_zone, slb_uma_real_alloc);
536 }
537}
538
539struct slb **
540slb_alloc_user_cache(void)
541{
542 return (uma_zalloc(slb_cache_zone, M_ZERO));
543}
544
545void
546slb_free_user_cache(struct slb **slb)
547{
548 uma_zfree(slb_cache_zone, slb);
549}