Deleted Added
full compact
slb.c (212722) slb.c (214574)
1/*-
2 * Copyright (c) 2010 Nathan Whitehorn
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *

--- 9 unchanged lines hidden (view full) ---

18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 *
1/*-
2 * Copyright (c) 2010 Nathan Whitehorn
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *

--- 9 unchanged lines hidden (view full) ---

18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 *
26 * $FreeBSD: head/sys/powerpc/aim/slb.c 212722 2010-09-16 03:46:17Z nwhitehorn $
26 * $FreeBSD: head/sys/powerpc/aim/slb.c 214574 2010-10-30 23:07:30Z nwhitehorn $
27 */
28
29#include <sys/param.h>
30#include <sys/kernel.h>
31#include <sys/lock.h>
32#include <sys/mutex.h>
33#include <sys/proc.h>
34#include <sys/systm.h>

--- 160 unchanged lines hidden (view full) ---

195uint64_t
196kernel_va_to_slbv(vm_offset_t va)
197{
198 uint64_t esid, slbv;
199
200 esid = (uintptr_t)va >> ADDR_SR_SHFT;
201
202 /* Set kernel VSID to deterministic value */
27 */
28
29#include <sys/param.h>
30#include <sys/kernel.h>
31#include <sys/lock.h>
32#include <sys/mutex.h>
33#include <sys/proc.h>
34#include <sys/systm.h>

--- 160 unchanged lines hidden (view full) ---

195uint64_t
196kernel_va_to_slbv(vm_offset_t va)
197{
198 uint64_t esid, slbv;
199
200 esid = (uintptr_t)va >> ADDR_SR_SHFT;
201
202 /* Set kernel VSID to deterministic value */
203 slbv = va_to_vsid(kernel_pmap, va) << SLBV_VSID_SHIFT;
203 slbv = (KERNEL_VSID((uintptr_t)va >> ADDR_SR_SHFT)) << SLBV_VSID_SHIFT;
204
205 /* Figure out if this is a large-page mapping */
206 if (hw_direct_map && va < VM_MIN_KERNEL_ADDRESS) {
207 /*
208 * XXX: If we have set up a direct map, assumes
209 * all physical memory is mapped with large pages.
210 */
211 if (mem_valid(va, 0) == 0)

--- 204 unchanged lines hidden (view full) ---

416 struct slb *slbcache;
417 int i, j;
418
419 /* We don't want to be preempted while modifying the kernel map */
420 critical_enter();
421
422 slbcache = PCPU_GET(slb);
423
204
205 /* Figure out if this is a large-page mapping */
206 if (hw_direct_map && va < VM_MIN_KERNEL_ADDRESS) {
207 /*
208 * XXX: If we have set up a direct map, assumes
209 * all physical memory is mapped with large pages.
210 */
211 if (mem_valid(va, 0) == 0)

--- 204 unchanged lines hidden (view full) ---

416 struct slb *slbcache;
417 int i, j;
418
419 /* We don't want to be preempted while modifying the kernel map */
420 critical_enter();
421
422 slbcache = PCPU_GET(slb);
423
424 /* Check for an unused slot, abusing the USER_SR slot as a full flag */
425 if (slbcache[USER_SR].slbe == 0) {
426 for (i = 0; i < USER_SR; i++) {
424 /* Check for an unused slot, abusing the user slot as a full flag */
425 if (slbcache[USER_SLB_SLOT].slbe == 0) {
426 for (i = 0; i < USER_SLB_SLOT; i++) {
427 if (!(slbcache[i].slbe & SLBE_VALID))
428 goto fillkernslb;
429 }
430
427 if (!(slbcache[i].slbe & SLBE_VALID))
428 goto fillkernslb;
429 }
430
431 if (i == USER_SR)
432 slbcache[USER_SR].slbe = 1;
431 if (i == USER_SLB_SLOT)
432 slbcache[USER_SLB_SLOT].slbe = 1;
433 }
434
435 for (i = mftb() % 64, j = 0; j < 64; j++, i = (i+1) % 64) {
433 }
434
435 for (i = mftb() % 64, j = 0; j < 64; j++, i = (i+1) % 64) {
436 if (i == USER_SR)
436 if (i == USER_SLB_SLOT)
437 continue;
438
439 if (SLB_SPILLABLE(slbcache[i].slbe))
440 break;
441 }
442
443 KASSERT(j < 64, ("All kernel SLB slots locked!"));
444

--- 53 unchanged lines hidden ---
437 continue;
438
439 if (SLB_SPILLABLE(slbcache[i].slbe))
440 break;
441 }
442
443 KASSERT(j < 64, ("All kernel SLB slots locked!"));
444

--- 53 unchanged lines hidden ---