Deleted Added
full compact
slb.c (227568) slb.c (230123)
1/*-
2 * Copyright (c) 2010 Nathan Whitehorn
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *

--- 9 unchanged lines hidden (view full) ---

18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 *
1/*-
2 * Copyright (c) 2010 Nathan Whitehorn
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *

--- 9 unchanged lines hidden (view full) ---

18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 *
26 * $FreeBSD: head/sys/powerpc/aim/slb.c 227568 2011-11-16 16:46:09Z alc $
26 * $FreeBSD: head/sys/powerpc/aim/slb.c 230123 2012-01-15 00:08:14Z nwhitehorn $
27 */
28
29#include <sys/param.h>
30#include <sys/kernel.h>
31#include <sys/lock.h>
32#include <sys/mutex.h>
33#include <sys/proc.h>
34#include <sys/systm.h>

--- 369 unchanged lines hidden (view full) ---

404 root = uma_zalloc(slbt_zone, M_NOWAIT | M_ZERO);
405 root->ua_level = UAD_ROOT_LEVEL;
406
407 return (root);
408}
409
410/* Lock entries mapping kernel text and stacks */
411
27 */
28
29#include <sys/param.h>
30#include <sys/kernel.h>
31#include <sys/lock.h>
32#include <sys/mutex.h>
33#include <sys/proc.h>
34#include <sys/systm.h>

--- 369 unchanged lines hidden (view full) ---

404 root = uma_zalloc(slbt_zone, M_NOWAIT | M_ZERO);
405 root->ua_level = UAD_ROOT_LEVEL;
406
407 return (root);
408}
409
410/* Lock entries mapping kernel text and stacks */
411
412#define SLB_SPILLABLE(slbe) \
413 (((slbe & SLBE_ESID_MASK) < VM_MIN_KERNEL_ADDRESS && \
414 (slbe & SLBE_ESID_MASK) > 16*SEGMENT_LENGTH) || \
415 (slbe & SLBE_ESID_MASK) > VM_MAX_KERNEL_ADDRESS)
416void
417slb_insert_kernel(uint64_t slbe, uint64_t slbv)
418{
419 struct slb *slbcache;
412void
413slb_insert_kernel(uint64_t slbe, uint64_t slbv)
414{
415 struct slb *slbcache;
420 int i, j;
416 int i;
421
422 /* We don't want to be preempted while modifying the kernel map */
423 critical_enter();
424
425 slbcache = PCPU_GET(slb);
426
427 /* Check for an unused slot, abusing the user slot as a full flag */
428 if (slbcache[USER_SLB_SLOT].slbe == 0) {
429 for (i = 0; i < n_slbs; i++) {
430 if (i == USER_SLB_SLOT)
431 continue;
432 if (!(slbcache[i].slbe & SLBE_VALID))
433 goto fillkernslb;
434 }
435
436 if (i == n_slbs)
437 slbcache[USER_SLB_SLOT].slbe = 1;
438 }
439
417
418 /* We don't want to be preempted while modifying the kernel map */
419 critical_enter();
420
421 slbcache = PCPU_GET(slb);
422
423 /* Check for an unused slot, abusing the user slot as a full flag */
424 if (slbcache[USER_SLB_SLOT].slbe == 0) {
425 for (i = 0; i < n_slbs; i++) {
426 if (i == USER_SLB_SLOT)
427 continue;
428 if (!(slbcache[i].slbe & SLBE_VALID))
429 goto fillkernslb;
430 }
431
432 if (i == n_slbs)
433 slbcache[USER_SLB_SLOT].slbe = 1;
434 }
435
440 for (i = mftb() % n_slbs, j = 0; j < n_slbs; j++, i = (i+1) % n_slbs) {
441 if (i == USER_SLB_SLOT)
442 continue;
436 i = mftb() % n_slbs;
437 if (i == USER_SLB_SLOT)
438 i = (i+1) % n_slbs;
443
439
444 if (SLB_SPILLABLE(slbcache[i].slbe))
445 break;
446 }
447
448 KASSERT(j < n_slbs, ("All kernel SLB slots locked!"));
449
450fillkernslb:
451 KASSERT(i != USER_SLB_SLOT,
452 ("Filling user SLB slot with a kernel mapping"));
453 slbcache[i].slbv = slbv;
454 slbcache[i].slbe = slbe | (uint64_t)i;
455
456 /* If it is for this CPU, put it in the SLB right away */
457 if (pmap_bootstrapped) {

--- 94 unchanged lines hidden ---
440fillkernslb:
441 KASSERT(i != USER_SLB_SLOT,
442 ("Filling user SLB slot with a kernel mapping"));
443 slbcache[i].slbv = slbv;
444 slbcache[i].slbe = slbe | (uint64_t)i;
445
446 /* If it is for this CPU, put it in the SLB right away */
447 if (pmap_bootstrapped) {

--- 94 unchanged lines hidden ---